libstoragemgmt-1.2.3/0000775000175000017500000000000012542455463011552 500000000000000libstoragemgmt-1.2.3/aclocal.m40000664000175000017500000016253612542455444013346 00000000000000# generated automatically by aclocal 1.13.4 -*- Autoconf -*- # Copyright (C) 1996-2013 Free Software Foundation, Inc. # This file is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. m4_ifndef([AC_CONFIG_MACRO_DIRS], [m4_defun([_AM_CONFIG_MACRO_DIRS], [])m4_defun([AC_CONFIG_MACRO_DIRS], [_AM_CONFIG_MACRO_DIRS($@)])]) m4_ifndef([AC_AUTOCONF_VERSION], [m4_copy([m4_PACKAGE_VERSION], [AC_AUTOCONF_VERSION])])dnl m4_if(m4_defn([AC_AUTOCONF_VERSION]), [2.69],, [m4_warning([this file was generated for autoconf 2.69. You have another version of autoconf. It may work, but is not guaranteed to. If you have problems, you may need to regenerate the build system entirely. To do so, use the procedure documented by the package, typically 'autoreconf'.])]) # pkg.m4 - Macros to locate and utilise pkg-config. -*- Autoconf -*- # serial 1 (pkg-config-0.24) # # Copyright © 2004 Scott James Remnant . # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. # # As a special exception to the GNU General Public License, if you # distribute this file as part of a program that contains a # configuration script generated by Autoconf, you may include it under # the same distribution terms that you use for the rest of that program. # PKG_PROG_PKG_CONFIG([MIN-VERSION]) # ---------------------------------- AC_DEFUN([PKG_PROG_PKG_CONFIG], [m4_pattern_forbid([^_?PKG_[A-Z_]+$]) m4_pattern_allow([^PKG_CONFIG(_(PATH|LIBDIR|SYSROOT_DIR|ALLOW_SYSTEM_(CFLAGS|LIBS)))?$]) m4_pattern_allow([^PKG_CONFIG_(DISABLE_UNINSTALLED|TOP_BUILD_DIR|DEBUG_SPEW)$]) AC_ARG_VAR([PKG_CONFIG], [path to pkg-config utility]) AC_ARG_VAR([PKG_CONFIG_PATH], [directories to add to pkg-config's search path]) AC_ARG_VAR([PKG_CONFIG_LIBDIR], [path overriding pkg-config's built-in search path]) if test "x$ac_cv_env_PKG_CONFIG_set" != "xset"; then AC_PATH_TOOL([PKG_CONFIG], [pkg-config]) fi if test -n "$PKG_CONFIG"; then _pkg_min_version=m4_default([$1], [0.9.0]) AC_MSG_CHECKING([pkg-config is at least version $_pkg_min_version]) if $PKG_CONFIG --atleast-pkgconfig-version $_pkg_min_version; then AC_MSG_RESULT([yes]) else AC_MSG_RESULT([no]) PKG_CONFIG="" fi fi[]dnl ])# PKG_PROG_PKG_CONFIG # PKG_CHECK_EXISTS(MODULES, [ACTION-IF-FOUND], [ACTION-IF-NOT-FOUND]) # # Check to see whether a particular set of modules exists. Similar # to PKG_CHECK_MODULES(), but does not set variables or print errors. # # Please remember that m4 expands AC_REQUIRE([PKG_PROG_PKG_CONFIG]) # only at the first occurence in configure.ac, so if the first place # it's called might be skipped (such as if it is within an "if", you # have to call PKG_CHECK_EXISTS manually # -------------------------------------------------------------- AC_DEFUN([PKG_CHECK_EXISTS], [AC_REQUIRE([PKG_PROG_PKG_CONFIG])dnl if test -n "$PKG_CONFIG" && \ AC_RUN_LOG([$PKG_CONFIG --exists --print-errors "$1"]); then m4_default([$2], [:]) m4_ifvaln([$3], [else $3])dnl fi]) # _PKG_CONFIG([VARIABLE], [COMMAND], [MODULES]) # --------------------------------------------- m4_define([_PKG_CONFIG], [if test -n "$$1"; then pkg_cv_[]$1="$$1" elif test -n "$PKG_CONFIG"; then PKG_CHECK_EXISTS([$3], [pkg_cv_[]$1=`$PKG_CONFIG --[]$2 "$3" 2>/dev/null` test "x$?" != "x0" && pkg_failed=yes ], [pkg_failed=yes]) else pkg_failed=untried fi[]dnl ])# _PKG_CONFIG # _PKG_SHORT_ERRORS_SUPPORTED # ----------------------------- AC_DEFUN([_PKG_SHORT_ERRORS_SUPPORTED], [AC_REQUIRE([PKG_PROG_PKG_CONFIG]) if $PKG_CONFIG --atleast-pkgconfig-version 0.20; then _pkg_short_errors_supported=yes else _pkg_short_errors_supported=no fi[]dnl ])# _PKG_SHORT_ERRORS_SUPPORTED # PKG_CHECK_MODULES(VARIABLE-PREFIX, MODULES, [ACTION-IF-FOUND], # [ACTION-IF-NOT-FOUND]) # # # Note that if there is a possibility the first call to # PKG_CHECK_MODULES might not happen, you should be sure to include an # explicit call to PKG_PROG_PKG_CONFIG in your configure.ac # # # -------------------------------------------------------------- AC_DEFUN([PKG_CHECK_MODULES], [AC_REQUIRE([PKG_PROG_PKG_CONFIG])dnl AC_ARG_VAR([$1][_CFLAGS], [C compiler flags for $1, overriding pkg-config])dnl AC_ARG_VAR([$1][_LIBS], [linker flags for $1, overriding pkg-config])dnl pkg_failed=no AC_MSG_CHECKING([for $1]) _PKG_CONFIG([$1][_CFLAGS], [cflags], [$2]) _PKG_CONFIG([$1][_LIBS], [libs], [$2]) m4_define([_PKG_TEXT], [Alternatively, you may set the environment variables $1[]_CFLAGS and $1[]_LIBS to avoid the need to call pkg-config. See the pkg-config man page for more details.]) if test $pkg_failed = yes; then AC_MSG_RESULT([no]) _PKG_SHORT_ERRORS_SUPPORTED if test $_pkg_short_errors_supported = yes; then $1[]_PKG_ERRORS=`$PKG_CONFIG --short-errors --print-errors --cflags --libs "$2" 2>&1` else $1[]_PKG_ERRORS=`$PKG_CONFIG --print-errors --cflags --libs "$2" 2>&1` fi # Put the nasty error message in config.log where it belongs echo "$$1[]_PKG_ERRORS" >&AS_MESSAGE_LOG_FD m4_default([$4], [AC_MSG_ERROR( [Package requirements ($2) were not met: $$1_PKG_ERRORS Consider adjusting the PKG_CONFIG_PATH environment variable if you installed software in a non-standard prefix. _PKG_TEXT])[]dnl ]) elif test $pkg_failed = untried; then AC_MSG_RESULT([no]) m4_default([$4], [AC_MSG_FAILURE( [The pkg-config script could not be found or is too old. Make sure it is in your PATH or set the PKG_CONFIG environment variable to the full path to pkg-config. _PKG_TEXT To get pkg-config, see .])[]dnl ]) else $1[]_CFLAGS=$pkg_cv_[]$1[]_CFLAGS $1[]_LIBS=$pkg_cv_[]$1[]_LIBS AC_MSG_RESULT([yes]) $3 fi[]dnl ])# PKG_CHECK_MODULES # PKG_INSTALLDIR(DIRECTORY) # ------------------------- # Substitutes the variable pkgconfigdir as the location where a module # should install pkg-config .pc files. By default the directory is # $libdir/pkgconfig, but the default can be changed by passing # DIRECTORY. The user can override through the --with-pkgconfigdir # parameter. AC_DEFUN([PKG_INSTALLDIR], [m4_pushdef([pkg_default], [m4_default([$1], ['${libdir}/pkgconfig'])]) m4_pushdef([pkg_description], [pkg-config installation directory @<:@]pkg_default[@:>@]) AC_ARG_WITH([pkgconfigdir], [AS_HELP_STRING([--with-pkgconfigdir], pkg_description)],, [with_pkgconfigdir=]pkg_default) AC_SUBST([pkgconfigdir], [$with_pkgconfigdir]) m4_popdef([pkg_default]) m4_popdef([pkg_description]) ]) dnl PKG_INSTALLDIR # PKG_NOARCH_INSTALLDIR(DIRECTORY) # ------------------------- # Substitutes the variable noarch_pkgconfigdir as the location where a # module should install arch-independent pkg-config .pc files. By # default the directory is $datadir/pkgconfig, but the default can be # changed by passing DIRECTORY. The user can override through the # --with-noarch-pkgconfigdir parameter. AC_DEFUN([PKG_NOARCH_INSTALLDIR], [m4_pushdef([pkg_default], [m4_default([$1], ['${datadir}/pkgconfig'])]) m4_pushdef([pkg_description], [pkg-config arch-independent installation directory @<:@]pkg_default[@:>@]) AC_ARG_WITH([noarch-pkgconfigdir], [AS_HELP_STRING([--with-noarch-pkgconfigdir], pkg_description)],, [with_noarch_pkgconfigdir=]pkg_default) AC_SUBST([noarch_pkgconfigdir], [$with_noarch_pkgconfigdir]) m4_popdef([pkg_default]) m4_popdef([pkg_description]) ]) dnl PKG_NOARCH_INSTALLDIR # Copyright (C) 2002-2013 Free Software Foundation, Inc. # # This file is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # AM_AUTOMAKE_VERSION(VERSION) # ---------------------------- # Automake X.Y traces this macro to ensure aclocal.m4 has been # generated from the m4 files accompanying Automake X.Y. # (This private macro should not be called outside this file.) AC_DEFUN([AM_AUTOMAKE_VERSION], [am__api_version='1.13' dnl Some users find AM_AUTOMAKE_VERSION and mistake it for a way to dnl require some minimum version. Point them to the right macro. m4_if([$1], [1.13.4], [], [AC_FATAL([Do not call $0, use AM_INIT_AUTOMAKE([$1]).])])dnl ]) # _AM_AUTOCONF_VERSION(VERSION) # ----------------------------- # aclocal traces this macro to find the Autoconf version. # This is a private macro too. Using m4_define simplifies # the logic in aclocal, which can simply ignore this definition. m4_define([_AM_AUTOCONF_VERSION], []) # AM_SET_CURRENT_AUTOMAKE_VERSION # ------------------------------- # Call AM_AUTOMAKE_VERSION and AM_AUTOMAKE_VERSION so they can be traced. # This function is AC_REQUIREd by AM_INIT_AUTOMAKE. AC_DEFUN([AM_SET_CURRENT_AUTOMAKE_VERSION], [AM_AUTOMAKE_VERSION([1.13.4])dnl m4_ifndef([AC_AUTOCONF_VERSION], [m4_copy([m4_PACKAGE_VERSION], [AC_AUTOCONF_VERSION])])dnl _AM_AUTOCONF_VERSION(m4_defn([AC_AUTOCONF_VERSION]))]) # AM_AUX_DIR_EXPAND -*- Autoconf -*- # Copyright (C) 2001-2013 Free Software Foundation, Inc. # # This file is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # For projects using AC_CONFIG_AUX_DIR([foo]), Autoconf sets # $ac_aux_dir to '$srcdir/foo'. In other projects, it is set to # '$srcdir', '$srcdir/..', or '$srcdir/../..'. # # Of course, Automake must honor this variable whenever it calls a # tool from the auxiliary directory. The problem is that $srcdir (and # therefore $ac_aux_dir as well) can be either absolute or relative, # depending on how configure is run. This is pretty annoying, since # it makes $ac_aux_dir quite unusable in subdirectories: in the top # source directory, any form will work fine, but in subdirectories a # relative path needs to be adjusted first. # # $ac_aux_dir/missing # fails when called from a subdirectory if $ac_aux_dir is relative # $top_srcdir/$ac_aux_dir/missing # fails if $ac_aux_dir is absolute, # fails when called from a subdirectory in a VPATH build with # a relative $ac_aux_dir # # The reason of the latter failure is that $top_srcdir and $ac_aux_dir # are both prefixed by $srcdir. In an in-source build this is usually # harmless because $srcdir is '.', but things will broke when you # start a VPATH build or use an absolute $srcdir. # # So we could use something similar to $top_srcdir/$ac_aux_dir/missing, # iff we strip the leading $srcdir from $ac_aux_dir. That would be: # am_aux_dir='\$(top_srcdir)/'`expr "$ac_aux_dir" : "$srcdir//*\(.*\)"` # and then we would define $MISSING as # MISSING="\${SHELL} $am_aux_dir/missing" # This will work as long as MISSING is not called from configure, because # unfortunately $(top_srcdir) has no meaning in configure. # However there are other variables, like CC, which are often used in # configure, and could therefore not use this "fixed" $ac_aux_dir. # # Another solution, used here, is to always expand $ac_aux_dir to an # absolute PATH. The drawback is that using absolute paths prevent a # configured tree to be moved without reconfiguration. AC_DEFUN([AM_AUX_DIR_EXPAND], [dnl Rely on autoconf to set up CDPATH properly. AC_PREREQ([2.50])dnl # expand $ac_aux_dir to an absolute path am_aux_dir=`cd $ac_aux_dir && pwd` ]) # AM_CONDITIONAL -*- Autoconf -*- # Copyright (C) 1997-2013 Free Software Foundation, Inc. # # This file is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # AM_CONDITIONAL(NAME, SHELL-CONDITION) # ------------------------------------- # Define a conditional. AC_DEFUN([AM_CONDITIONAL], [AC_PREREQ([2.52])dnl m4_if([$1], [TRUE], [AC_FATAL([$0: invalid condition: $1])], [$1], [FALSE], [AC_FATAL([$0: invalid condition: $1])])dnl AC_SUBST([$1_TRUE])dnl AC_SUBST([$1_FALSE])dnl _AM_SUBST_NOTMAKE([$1_TRUE])dnl _AM_SUBST_NOTMAKE([$1_FALSE])dnl m4_define([_AM_COND_VALUE_$1], [$2])dnl if $2; then $1_TRUE= $1_FALSE='#' else $1_TRUE='#' $1_FALSE= fi AC_CONFIG_COMMANDS_PRE( [if test -z "${$1_TRUE}" && test -z "${$1_FALSE}"; then AC_MSG_ERROR([[conditional "$1" was never defined. Usually this means the macro was only invoked conditionally.]]) fi])]) # Copyright (C) 1999-2013 Free Software Foundation, Inc. # # This file is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # There are a few dirty hacks below to avoid letting 'AC_PROG_CC' be # written in clear, in which case automake, when reading aclocal.m4, # will think it sees a *use*, and therefore will trigger all it's # C support machinery. Also note that it means that autoscan, seeing # CC etc. in the Makefile, will ask for an AC_PROG_CC use... # _AM_DEPENDENCIES(NAME) # ---------------------- # See how the compiler implements dependency checking. # NAME is "CC", "CXX", "OBJC", "OBJCXX", "UPC", or "GJC". # We try a few techniques and use that to set a single cache variable. # # We don't AC_REQUIRE the corresponding AC_PROG_CC since the latter was # modified to invoke _AM_DEPENDENCIES(CC); we would have a circular # dependency, and given that the user is not expected to run this macro, # just rely on AC_PROG_CC. AC_DEFUN([_AM_DEPENDENCIES], [AC_REQUIRE([AM_SET_DEPDIR])dnl AC_REQUIRE([AM_OUTPUT_DEPENDENCY_COMMANDS])dnl AC_REQUIRE([AM_MAKE_INCLUDE])dnl AC_REQUIRE([AM_DEP_TRACK])dnl m4_if([$1], [CC], [depcc="$CC" am_compiler_list=], [$1], [CXX], [depcc="$CXX" am_compiler_list=], [$1], [OBJC], [depcc="$OBJC" am_compiler_list='gcc3 gcc'], [$1], [OBJCXX], [depcc="$OBJCXX" am_compiler_list='gcc3 gcc'], [$1], [UPC], [depcc="$UPC" am_compiler_list=], [$1], [GCJ], [depcc="$GCJ" am_compiler_list='gcc3 gcc'], [depcc="$$1" am_compiler_list=]) AC_CACHE_CHECK([dependency style of $depcc], [am_cv_$1_dependencies_compiler_type], [if test -z "$AMDEP_TRUE" && test -f "$am_depcomp"; then # We make a subdir and do the tests there. Otherwise we can end up # making bogus files that we don't know about and never remove. For # instance it was reported that on HP-UX the gcc test will end up # making a dummy file named 'D' -- because '-MD' means "put the output # in D". rm -rf conftest.dir mkdir conftest.dir # Copy depcomp to subdir because otherwise we won't find it if we're # using a relative directory. cp "$am_depcomp" conftest.dir cd conftest.dir # We will build objects and dependencies in a subdirectory because # it helps to detect inapplicable dependency modes. For instance # both Tru64's cc and ICC support -MD to output dependencies as a # side effect of compilation, but ICC will put the dependencies in # the current directory while Tru64 will put them in the object # directory. mkdir sub am_cv_$1_dependencies_compiler_type=none if test "$am_compiler_list" = ""; then am_compiler_list=`sed -n ['s/^#*\([a-zA-Z0-9]*\))$/\1/p'] < ./depcomp` fi am__universal=false m4_case([$1], [CC], [case " $depcc " in #( *\ -arch\ *\ -arch\ *) am__universal=true ;; esac], [CXX], [case " $depcc " in #( *\ -arch\ *\ -arch\ *) am__universal=true ;; esac]) for depmode in $am_compiler_list; do # Setup a source with many dependencies, because some compilers # like to wrap large dependency lists on column 80 (with \), and # we should not choose a depcomp mode which is confused by this. # # We need to recreate these files for each test, as the compiler may # overwrite some of them when testing with obscure command lines. # This happens at least with the AIX C compiler. : > sub/conftest.c for i in 1 2 3 4 5 6; do echo '#include "conftst'$i'.h"' >> sub/conftest.c # Using ": > sub/conftst$i.h" creates only sub/conftst1.h with # Solaris 10 /bin/sh. echo '/* dummy */' > sub/conftst$i.h done echo "${am__include} ${am__quote}sub/conftest.Po${am__quote}" > confmf # We check with '-c' and '-o' for the sake of the "dashmstdout" # mode. It turns out that the SunPro C++ compiler does not properly # handle '-M -o', and we need to detect this. Also, some Intel # versions had trouble with output in subdirs. am__obj=sub/conftest.${OBJEXT-o} am__minus_obj="-o $am__obj" case $depmode in gcc) # This depmode causes a compiler race in universal mode. test "$am__universal" = false || continue ;; nosideeffect) # After this tag, mechanisms are not by side-effect, so they'll # only be used when explicitly requested. if test "x$enable_dependency_tracking" = xyes; then continue else break fi ;; msvc7 | msvc7msys | msvisualcpp | msvcmsys) # This compiler won't grok '-c -o', but also, the minuso test has # not run yet. These depmodes are late enough in the game, and # so weak that their functioning should not be impacted. am__obj=conftest.${OBJEXT-o} am__minus_obj= ;; none) break ;; esac if depmode=$depmode \ source=sub/conftest.c object=$am__obj \ depfile=sub/conftest.Po tmpdepfile=sub/conftest.TPo \ $SHELL ./depcomp $depcc -c $am__minus_obj sub/conftest.c \ >/dev/null 2>conftest.err && grep sub/conftst1.h sub/conftest.Po > /dev/null 2>&1 && grep sub/conftst6.h sub/conftest.Po > /dev/null 2>&1 && grep $am__obj sub/conftest.Po > /dev/null 2>&1 && ${MAKE-make} -s -f confmf > /dev/null 2>&1; then # icc doesn't choke on unknown options, it will just issue warnings # or remarks (even with -Werror). So we grep stderr for any message # that says an option was ignored or not supported. # When given -MP, icc 7.0 and 7.1 complain thusly: # icc: Command line warning: ignoring option '-M'; no argument required # The diagnosis changed in icc 8.0: # icc: Command line remark: option '-MP' not supported if (grep 'ignoring option' conftest.err || grep 'not supported' conftest.err) >/dev/null 2>&1; then :; else am_cv_$1_dependencies_compiler_type=$depmode break fi fi done cd .. rm -rf conftest.dir else am_cv_$1_dependencies_compiler_type=none fi ]) AC_SUBST([$1DEPMODE], [depmode=$am_cv_$1_dependencies_compiler_type]) AM_CONDITIONAL([am__fastdep$1], [ test "x$enable_dependency_tracking" != xno \ && test "$am_cv_$1_dependencies_compiler_type" = gcc3]) ]) # AM_SET_DEPDIR # ------------- # Choose a directory name for dependency files. # This macro is AC_REQUIREd in _AM_DEPENDENCIES. AC_DEFUN([AM_SET_DEPDIR], [AC_REQUIRE([AM_SET_LEADING_DOT])dnl AC_SUBST([DEPDIR], ["${am__leading_dot}deps"])dnl ]) # AM_DEP_TRACK # ------------ AC_DEFUN([AM_DEP_TRACK], [AC_ARG_ENABLE([dependency-tracking], [dnl AS_HELP_STRING( [--enable-dependency-tracking], [do not reject slow dependency extractors]) AS_HELP_STRING( [--disable-dependency-tracking], [speeds up one-time build])]) if test "x$enable_dependency_tracking" != xno; then am_depcomp="$ac_aux_dir/depcomp" AMDEPBACKSLASH='\' am__nodep='_no' fi AM_CONDITIONAL([AMDEP], [test "x$enable_dependency_tracking" != xno]) AC_SUBST([AMDEPBACKSLASH])dnl _AM_SUBST_NOTMAKE([AMDEPBACKSLASH])dnl AC_SUBST([am__nodep])dnl _AM_SUBST_NOTMAKE([am__nodep])dnl ]) # Generate code to set up dependency tracking. -*- Autoconf -*- # Copyright (C) 1999-2013 Free Software Foundation, Inc. # # This file is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # _AM_OUTPUT_DEPENDENCY_COMMANDS # ------------------------------ AC_DEFUN([_AM_OUTPUT_DEPENDENCY_COMMANDS], [{ # Older Autoconf quotes --file arguments for eval, but not when files # are listed without --file. Let's play safe and only enable the eval # if we detect the quoting. case $CONFIG_FILES in *\'*) eval set x "$CONFIG_FILES" ;; *) set x $CONFIG_FILES ;; esac shift for mf do # Strip MF so we end up with the name of the file. mf=`echo "$mf" | sed -e 's/:.*$//'` # Check whether this is an Automake generated Makefile or not. # We used to match only the files named 'Makefile.in', but # some people rename them; so instead we look at the file content. # Grep'ing the first line is not enough: some people post-process # each Makefile.in and add a new line on top of each file to say so. # Grep'ing the whole file is not good either: AIX grep has a line # limit of 2048, but all sed's we know have understand at least 4000. if sed -n 's,^#.*generated by automake.*,X,p' "$mf" | grep X >/dev/null 2>&1; then dirpart=`AS_DIRNAME("$mf")` else continue fi # Extract the definition of DEPDIR, am__include, and am__quote # from the Makefile without running 'make'. DEPDIR=`sed -n 's/^DEPDIR = //p' < "$mf"` test -z "$DEPDIR" && continue am__include=`sed -n 's/^am__include = //p' < "$mf"` test -z "$am__include" && continue am__quote=`sed -n 's/^am__quote = //p' < "$mf"` # Find all dependency output files, they are included files with # $(DEPDIR) in their names. We invoke sed twice because it is the # simplest approach to changing $(DEPDIR) to its actual value in the # expansion. for file in `sed -n " s/^$am__include $am__quote\(.*(DEPDIR).*\)$am__quote"'$/\1/p' <"$mf" | \ sed -e 's/\$(DEPDIR)/'"$DEPDIR"'/g'`; do # Make sure the directory exists. test -f "$dirpart/$file" && continue fdir=`AS_DIRNAME(["$file"])` AS_MKDIR_P([$dirpart/$fdir]) # echo "creating $dirpart/$file" echo '# dummy' > "$dirpart/$file" done done } ])# _AM_OUTPUT_DEPENDENCY_COMMANDS # AM_OUTPUT_DEPENDENCY_COMMANDS # ----------------------------- # This macro should only be invoked once -- use via AC_REQUIRE. # # This code is only required when automatic dependency tracking # is enabled. FIXME. This creates each '.P' file that we will # need in order to bootstrap the dependency handling code. AC_DEFUN([AM_OUTPUT_DEPENDENCY_COMMANDS], [AC_CONFIG_COMMANDS([depfiles], [test x"$AMDEP_TRUE" != x"" || _AM_OUTPUT_DEPENDENCY_COMMANDS], [AMDEP_TRUE="$AMDEP_TRUE" ac_aux_dir="$ac_aux_dir"]) ]) # Do all the work for Automake. -*- Autoconf -*- # Copyright (C) 1996-2013 Free Software Foundation, Inc. # # This file is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This macro actually does too much. Some checks are only needed if # your package does certain things. But this isn't really a big deal. # AM_INIT_AUTOMAKE(PACKAGE, VERSION, [NO-DEFINE]) # AM_INIT_AUTOMAKE([OPTIONS]) # ----------------------------------------------- # The call with PACKAGE and VERSION arguments is the old style # call (pre autoconf-2.50), which is being phased out. PACKAGE # and VERSION should now be passed to AC_INIT and removed from # the call to AM_INIT_AUTOMAKE. # We support both call styles for the transition. After # the next Automake release, Autoconf can make the AC_INIT # arguments mandatory, and then we can depend on a new Autoconf # release and drop the old call support. AC_DEFUN([AM_INIT_AUTOMAKE], [AC_PREREQ([2.65])dnl dnl Autoconf wants to disallow AM_ names. We explicitly allow dnl the ones we care about. m4_pattern_allow([^AM_[A-Z]+FLAGS$])dnl AC_REQUIRE([AM_SET_CURRENT_AUTOMAKE_VERSION])dnl AC_REQUIRE([AC_PROG_INSTALL])dnl if test "`cd $srcdir && pwd`" != "`pwd`"; then # Use -I$(srcdir) only when $(srcdir) != ., so that make's output # is not polluted with repeated "-I." AC_SUBST([am__isrc], [' -I$(srcdir)'])_AM_SUBST_NOTMAKE([am__isrc])dnl # test to see if srcdir already configured if test -f $srcdir/config.status; then AC_MSG_ERROR([source directory already configured; run "make distclean" there first]) fi fi # test whether we have cygpath if test -z "$CYGPATH_W"; then if (cygpath --version) >/dev/null 2>/dev/null; then CYGPATH_W='cygpath -w' else CYGPATH_W=echo fi fi AC_SUBST([CYGPATH_W]) # Define the identity of the package. dnl Distinguish between old-style and new-style calls. m4_ifval([$2], [AC_DIAGNOSE([obsolete], [$0: two- and three-arguments forms are deprecated.]) m4_ifval([$3], [_AM_SET_OPTION([no-define])])dnl AC_SUBST([PACKAGE], [$1])dnl AC_SUBST([VERSION], [$2])], [_AM_SET_OPTIONS([$1])dnl dnl Diagnose old-style AC_INIT with new-style AM_AUTOMAKE_INIT. m4_if( m4_ifdef([AC_PACKAGE_NAME], [ok]):m4_ifdef([AC_PACKAGE_VERSION], [ok]), [ok:ok],, [m4_fatal([AC_INIT should be called with package and version arguments])])dnl AC_SUBST([PACKAGE], ['AC_PACKAGE_TARNAME'])dnl AC_SUBST([VERSION], ['AC_PACKAGE_VERSION'])])dnl _AM_IF_OPTION([no-define],, [AC_DEFINE_UNQUOTED([PACKAGE], ["$PACKAGE"], [Name of package]) AC_DEFINE_UNQUOTED([VERSION], ["$VERSION"], [Version number of package])])dnl # Some tools Automake needs. AC_REQUIRE([AM_SANITY_CHECK])dnl AC_REQUIRE([AC_ARG_PROGRAM])dnl AM_MISSING_PROG([ACLOCAL], [aclocal-${am__api_version}]) AM_MISSING_PROG([AUTOCONF], [autoconf]) AM_MISSING_PROG([AUTOMAKE], [automake-${am__api_version}]) AM_MISSING_PROG([AUTOHEADER], [autoheader]) AM_MISSING_PROG([MAKEINFO], [makeinfo]) AC_REQUIRE([AM_PROG_INSTALL_SH])dnl AC_REQUIRE([AM_PROG_INSTALL_STRIP])dnl AC_REQUIRE([AC_PROG_MKDIR_P])dnl # For better backward compatibility. To be removed once Automake 1.9.x # dies out for good. For more background, see: # # AC_SUBST([mkdir_p], ['$(MKDIR_P)']) # We need awk for the "check" target. The system "awk" is bad on # some platforms. AC_REQUIRE([AC_PROG_AWK])dnl AC_REQUIRE([AC_PROG_MAKE_SET])dnl AC_REQUIRE([AM_SET_LEADING_DOT])dnl _AM_IF_OPTION([tar-ustar], [_AM_PROG_TAR([ustar])], [_AM_IF_OPTION([tar-pax], [_AM_PROG_TAR([pax])], [_AM_PROG_TAR([v7])])]) _AM_IF_OPTION([no-dependencies],, [AC_PROVIDE_IFELSE([AC_PROG_CC], [_AM_DEPENDENCIES([CC])], [m4_define([AC_PROG_CC], m4_defn([AC_PROG_CC])[_AM_DEPENDENCIES([CC])])])dnl AC_PROVIDE_IFELSE([AC_PROG_CXX], [_AM_DEPENDENCIES([CXX])], [m4_define([AC_PROG_CXX], m4_defn([AC_PROG_CXX])[_AM_DEPENDENCIES([CXX])])])dnl AC_PROVIDE_IFELSE([AC_PROG_OBJC], [_AM_DEPENDENCIES([OBJC])], [m4_define([AC_PROG_OBJC], m4_defn([AC_PROG_OBJC])[_AM_DEPENDENCIES([OBJC])])])dnl AC_PROVIDE_IFELSE([AC_PROG_OBJCXX], [_AM_DEPENDENCIES([OBJCXX])], [m4_define([AC_PROG_OBJCXX], m4_defn([AC_PROG_OBJCXX])[_AM_DEPENDENCIES([OBJCXX])])])dnl ]) AC_REQUIRE([AM_SILENT_RULES])dnl dnl The testsuite driver may need to know about EXEEXT, so add the dnl 'am__EXEEXT' conditional if _AM_COMPILER_EXEEXT was seen. This dnl macro is hooked onto _AC_COMPILER_EXEEXT early, see below. AC_CONFIG_COMMANDS_PRE(dnl [m4_provide_if([_AM_COMPILER_EXEEXT], [AM_CONDITIONAL([am__EXEEXT], [test -n "$EXEEXT"])])])dnl ]) dnl Hook into '_AC_COMPILER_EXEEXT' early to learn its expansion. Do not dnl add the conditional right here, as _AC_COMPILER_EXEEXT may be further dnl mangled by Autoconf and run in a shell conditional statement. m4_define([_AC_COMPILER_EXEEXT], m4_defn([_AC_COMPILER_EXEEXT])[m4_provide([_AM_COMPILER_EXEEXT])]) # When config.status generates a header, we must update the stamp-h file. # This file resides in the same directory as the config header # that is generated. The stamp files are numbered to have different names. # Autoconf calls _AC_AM_CONFIG_HEADER_HOOK (when defined) in the # loop where config.status creates the headers, so we can generate # our stamp files there. AC_DEFUN([_AC_AM_CONFIG_HEADER_HOOK], [# Compute $1's index in $config_headers. _am_arg=$1 _am_stamp_count=1 for _am_header in $config_headers :; do case $_am_header in $_am_arg | $_am_arg:* ) break ;; * ) _am_stamp_count=`expr $_am_stamp_count + 1` ;; esac done echo "timestamp for $_am_arg" >`AS_DIRNAME(["$_am_arg"])`/stamp-h[]$_am_stamp_count]) # Copyright (C) 2001-2013 Free Software Foundation, Inc. # # This file is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # AM_PROG_INSTALL_SH # ------------------ # Define $install_sh. AC_DEFUN([AM_PROG_INSTALL_SH], [AC_REQUIRE([AM_AUX_DIR_EXPAND])dnl if test x"${install_sh}" != xset; then case $am_aux_dir in *\ * | *\ *) install_sh="\${SHELL} '$am_aux_dir/install-sh'" ;; *) install_sh="\${SHELL} $am_aux_dir/install-sh" esac fi AC_SUBST([install_sh])]) # Copyright (C) 2003-2013 Free Software Foundation, Inc. # # This file is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # Check whether the underlying file-system supports filenames # with a leading dot. For instance MS-DOS doesn't. AC_DEFUN([AM_SET_LEADING_DOT], [rm -rf .tst 2>/dev/null mkdir .tst 2>/dev/null if test -d .tst; then am__leading_dot=. else am__leading_dot=_ fi rmdir .tst 2>/dev/null AC_SUBST([am__leading_dot])]) # Add --enable-maintainer-mode option to configure. -*- Autoconf -*- # From Jim Meyering # Copyright (C) 1996-2013 Free Software Foundation, Inc. # # This file is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # AM_MAINTAINER_MODE([DEFAULT-MODE]) # ---------------------------------- # Control maintainer-specific portions of Makefiles. # Default is to disable them, unless 'enable' is passed literally. # For symmetry, 'disable' may be passed as well. Anyway, the user # can override the default with the --enable/--disable switch. AC_DEFUN([AM_MAINTAINER_MODE], [m4_case(m4_default([$1], [disable]), [enable], [m4_define([am_maintainer_other], [disable])], [disable], [m4_define([am_maintainer_other], [enable])], [m4_define([am_maintainer_other], [enable]) m4_warn([syntax], [unexpected argument to AM@&t@_MAINTAINER_MODE: $1])]) AC_MSG_CHECKING([whether to enable maintainer-specific portions of Makefiles]) dnl maintainer-mode's default is 'disable' unless 'enable' is passed AC_ARG_ENABLE([maintainer-mode], [AS_HELP_STRING([--]am_maintainer_other[-maintainer-mode], am_maintainer_other[ make rules and dependencies not useful (and sometimes confusing) to the casual installer])], [USE_MAINTAINER_MODE=$enableval], [USE_MAINTAINER_MODE=]m4_if(am_maintainer_other, [enable], [no], [yes])) AC_MSG_RESULT([$USE_MAINTAINER_MODE]) AM_CONDITIONAL([MAINTAINER_MODE], [test $USE_MAINTAINER_MODE = yes]) MAINT=$MAINTAINER_MODE_TRUE AC_SUBST([MAINT])dnl ] ) # Check to see how 'make' treats includes. -*- Autoconf -*- # Copyright (C) 2001-2013 Free Software Foundation, Inc. # # This file is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # AM_MAKE_INCLUDE() # ----------------- # Check to see how make treats includes. AC_DEFUN([AM_MAKE_INCLUDE], [am_make=${MAKE-make} cat > confinc << 'END' am__doit: @echo this is the am__doit target .PHONY: am__doit END # If we don't find an include directive, just comment out the code. AC_MSG_CHECKING([for style of include used by $am_make]) am__include="#" am__quote= _am_result=none # First try GNU make style include. echo "include confinc" > confmf # Ignore all kinds of additional output from 'make'. case `$am_make -s -f confmf 2> /dev/null` in #( *the\ am__doit\ target*) am__include=include am__quote= _am_result=GNU ;; esac # Now try BSD make style include. if test "$am__include" = "#"; then echo '.include "confinc"' > confmf case `$am_make -s -f confmf 2> /dev/null` in #( *the\ am__doit\ target*) am__include=.include am__quote="\"" _am_result=BSD ;; esac fi AC_SUBST([am__include]) AC_SUBST([am__quote]) AC_MSG_RESULT([$_am_result]) rm -f confinc confmf ]) # Copyright (C) 1999-2013 Free Software Foundation, Inc. # # This file is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # AM_PROG_CC_C_O # -------------- # Like AC_PROG_CC_C_O, but changed for automake. AC_DEFUN([AM_PROG_CC_C_O], [AC_REQUIRE([AC_PROG_CC_C_O])dnl AC_REQUIRE([AM_AUX_DIR_EXPAND])dnl AC_REQUIRE_AUX_FILE([compile])dnl # FIXME: we rely on the cache variable name because # there is no other way. set dummy $CC am_cc=`echo $[2] | sed ['s/[^a-zA-Z0-9_]/_/g;s/^[0-9]/_/']` eval am_t=\$ac_cv_prog_cc_${am_cc}_c_o if test "$am_t" != yes; then # Losing compiler, so override with the script. # FIXME: It is wrong to rewrite CC. # But if we don't then we get into trouble of one sort or another. # A longer-term fix would be to have automake use am__CC in this case, # and then we could set am__CC="\$(top_srcdir)/compile \$(CC)" CC="$am_aux_dir/compile $CC" fi dnl Make sure AC_PROG_CC is never called again, or it will override our dnl setting of CC. m4_define([AC_PROG_CC], [m4_fatal([AC_PROG_CC cannot be called after AM_PROG_CC_C_O])]) ]) # Fake the existence of programs that GNU maintainers use. -*- Autoconf -*- # Copyright (C) 1997-2013 Free Software Foundation, Inc. # # This file is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # AM_MISSING_PROG(NAME, PROGRAM) # ------------------------------ AC_DEFUN([AM_MISSING_PROG], [AC_REQUIRE([AM_MISSING_HAS_RUN]) $1=${$1-"${am_missing_run}$2"} AC_SUBST($1)]) # AM_MISSING_HAS_RUN # ------------------ # Define MISSING if not defined so far and test if it is modern enough. # If it is, set am_missing_run to use it, otherwise, to nothing. AC_DEFUN([AM_MISSING_HAS_RUN], [AC_REQUIRE([AM_AUX_DIR_EXPAND])dnl AC_REQUIRE_AUX_FILE([missing])dnl if test x"${MISSING+set}" != xset; then case $am_aux_dir in *\ * | *\ *) MISSING="\${SHELL} \"$am_aux_dir/missing\"" ;; *) MISSING="\${SHELL} $am_aux_dir/missing" ;; esac fi # Use eval to expand $SHELL if eval "$MISSING --is-lightweight"; then am_missing_run="$MISSING " else am_missing_run= AC_MSG_WARN(['missing' script is too old or missing]) fi ]) # -*- Autoconf -*- # Obsolete and "removed" macros, that must however still report explicit # error messages when used, to smooth transition. # # Copyright (C) 1996-2013 Free Software Foundation, Inc. # # This file is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. AC_DEFUN([AM_CONFIG_HEADER], [AC_DIAGNOSE([obsolete], ['$0': this macro is obsolete. You should use the 'AC][_CONFIG_HEADERS' macro instead.])dnl AC_CONFIG_HEADERS($@)]) AC_DEFUN([AM_PROG_CC_STDC], [AC_PROG_CC am_cv_prog_cc_stdc=$ac_cv_prog_cc_stdc AC_DIAGNOSE([obsolete], ['$0': this macro is obsolete. You should simply use the 'AC][_PROG_CC' macro instead. Also, your code should no longer depend upon 'am_cv_prog_cc_stdc', but upon 'ac_cv_prog_cc_stdc'.])]) AC_DEFUN([AM_C_PROTOTYPES], [AC_FATAL([automatic de-ANSI-fication support has been removed])]) AU_DEFUN([fp_C_PROTOTYPES], [AM_C_PROTOTYPES]) # Helper functions for option handling. -*- Autoconf -*- # Copyright (C) 2001-2013 Free Software Foundation, Inc. # # This file is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # _AM_MANGLE_OPTION(NAME) # ----------------------- AC_DEFUN([_AM_MANGLE_OPTION], [[_AM_OPTION_]m4_bpatsubst($1, [[^a-zA-Z0-9_]], [_])]) # _AM_SET_OPTION(NAME) # -------------------- # Set option NAME. Presently that only means defining a flag for this option. AC_DEFUN([_AM_SET_OPTION], [m4_define(_AM_MANGLE_OPTION([$1]), [1])]) # _AM_SET_OPTIONS(OPTIONS) # ------------------------ # OPTIONS is a space-separated list of Automake options. AC_DEFUN([_AM_SET_OPTIONS], [m4_foreach_w([_AM_Option], [$1], [_AM_SET_OPTION(_AM_Option)])]) # _AM_IF_OPTION(OPTION, IF-SET, [IF-NOT-SET]) # ------------------------------------------- # Execute IF-SET if OPTION is set, IF-NOT-SET otherwise. AC_DEFUN([_AM_IF_OPTION], [m4_ifset(_AM_MANGLE_OPTION([$1]), [$2], [$3])]) # Copyright (C) 1999-2013 Free Software Foundation, Inc. # # This file is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # AM_PATH_PYTHON([MINIMUM-VERSION], [ACTION-IF-FOUND], [ACTION-IF-NOT-FOUND]) # --------------------------------------------------------------------------- # Adds support for distributing Python modules and packages. To # install modules, copy them to $(pythondir), using the python_PYTHON # automake variable. To install a package with the same name as the # automake package, install to $(pkgpythondir), or use the # pkgpython_PYTHON automake variable. # # The variables $(pyexecdir) and $(pkgpyexecdir) are provided as # locations to install python extension modules (shared libraries). # Another macro is required to find the appropriate flags to compile # extension modules. # # If your package is configured with a different prefix to python, # users will have to add the install directory to the PYTHONPATH # environment variable, or create a .pth file (see the python # documentation for details). # # If the MINIMUM-VERSION argument is passed, AM_PATH_PYTHON will # cause an error if the version of python installed on the system # doesn't meet the requirement. MINIMUM-VERSION should consist of # numbers and dots only. AC_DEFUN([AM_PATH_PYTHON], [ dnl Find a Python interpreter. Python versions prior to 2.0 are not dnl supported. (2.0 was released on October 16, 2000). m4_define_default([_AM_PYTHON_INTERPRETER_LIST], [python python2 python3 python3.3 python3.2 python3.1 python3.0 python2.7 dnl python2.6 python2.5 python2.4 python2.3 python2.2 python2.1 python2.0]) AC_ARG_VAR([PYTHON], [the Python interpreter]) m4_if([$1],[],[ dnl No version check is needed. # Find any Python interpreter. if test -z "$PYTHON"; then AC_PATH_PROGS([PYTHON], _AM_PYTHON_INTERPRETER_LIST, :) fi am_display_PYTHON=python ], [ dnl A version check is needed. if test -n "$PYTHON"; then # If the user set $PYTHON, use it and don't search something else. AC_MSG_CHECKING([whether $PYTHON version is >= $1]) AM_PYTHON_CHECK_VERSION([$PYTHON], [$1], [AC_MSG_RESULT([yes])], [AC_MSG_RESULT([no]) AC_MSG_ERROR([Python interpreter is too old])]) am_display_PYTHON=$PYTHON else # Otherwise, try each interpreter until we find one that satisfies # VERSION. AC_CACHE_CHECK([for a Python interpreter with version >= $1], [am_cv_pathless_PYTHON],[ for am_cv_pathless_PYTHON in _AM_PYTHON_INTERPRETER_LIST none; do test "$am_cv_pathless_PYTHON" = none && break AM_PYTHON_CHECK_VERSION([$am_cv_pathless_PYTHON], [$1], [break]) done]) # Set $PYTHON to the absolute path of $am_cv_pathless_PYTHON. if test "$am_cv_pathless_PYTHON" = none; then PYTHON=: else AC_PATH_PROG([PYTHON], [$am_cv_pathless_PYTHON]) fi am_display_PYTHON=$am_cv_pathless_PYTHON fi ]) if test "$PYTHON" = :; then dnl Run any user-specified action, or abort. m4_default([$3], [AC_MSG_ERROR([no suitable Python interpreter found])]) else dnl Query Python for its version number. Getting [:3] seems to be dnl the best way to do this; it's what "site.py" does in the standard dnl library. AC_CACHE_CHECK([for $am_display_PYTHON version], [am_cv_python_version], [am_cv_python_version=`$PYTHON -c "import sys; sys.stdout.write(sys.version[[:3]])"`]) AC_SUBST([PYTHON_VERSION], [$am_cv_python_version]) dnl Use the values of $prefix and $exec_prefix for the corresponding dnl values of PYTHON_PREFIX and PYTHON_EXEC_PREFIX. These are made dnl distinct variables so they can be overridden if need be. However, dnl general consensus is that you shouldn't need this ability. AC_SUBST([PYTHON_PREFIX], ['${prefix}']) AC_SUBST([PYTHON_EXEC_PREFIX], ['${exec_prefix}']) dnl At times (like when building shared libraries) you may want dnl to know which OS platform Python thinks this is. AC_CACHE_CHECK([for $am_display_PYTHON platform], [am_cv_python_platform], [am_cv_python_platform=`$PYTHON -c "import sys; sys.stdout.write(sys.platform)"`]) AC_SUBST([PYTHON_PLATFORM], [$am_cv_python_platform]) # Just factor out some code duplication. am_python_setup_sysconfig="\ import sys # Prefer sysconfig over distutils.sysconfig, for better compatibility # with python 3.x. See automake bug#10227. try: import sysconfig except ImportError: can_use_sysconfig = 0 else: can_use_sysconfig = 1 # Can't use sysconfig in CPython 2.7, since it's broken in virtualenvs: # try: from platform import python_implementation if python_implementation() == 'CPython' and sys.version[[:3]] == '2.7': can_use_sysconfig = 0 except ImportError: pass" dnl Set up 4 directories: dnl pythondir -- where to install python scripts. This is the dnl site-packages directory, not the python standard library dnl directory like in previous automake betas. This behavior dnl is more consistent with lispdir.m4 for example. dnl Query distutils for this directory. AC_CACHE_CHECK([for $am_display_PYTHON script directory], [am_cv_python_pythondir], [if test "x$prefix" = xNONE then am_py_prefix=$ac_default_prefix else am_py_prefix=$prefix fi am_cv_python_pythondir=`$PYTHON -c " $am_python_setup_sysconfig if can_use_sysconfig: sitedir = sysconfig.get_path('purelib', vars={'base':'$am_py_prefix'}) else: from distutils import sysconfig sitedir = sysconfig.get_python_lib(0, 0, prefix='$am_py_prefix') sys.stdout.write(sitedir)"` case $am_cv_python_pythondir in $am_py_prefix*) am__strip_prefix=`echo "$am_py_prefix" | sed 's|.|.|g'` am_cv_python_pythondir=`echo "$am_cv_python_pythondir" | sed "s,^$am__strip_prefix,$PYTHON_PREFIX,"` ;; *) case $am_py_prefix in /usr|/System*) ;; *) am_cv_python_pythondir=$PYTHON_PREFIX/lib/python$PYTHON_VERSION/site-packages ;; esac ;; esac ]) AC_SUBST([pythondir], [$am_cv_python_pythondir]) dnl pkgpythondir -- $PACKAGE directory under pythondir. Was dnl PYTHON_SITE_PACKAGE in previous betas, but this naming is dnl more consistent with the rest of automake. AC_SUBST([pkgpythondir], [\${pythondir}/$PACKAGE]) dnl pyexecdir -- directory for installing python extension modules dnl (shared libraries) dnl Query distutils for this directory. AC_CACHE_CHECK([for $am_display_PYTHON extension module directory], [am_cv_python_pyexecdir], [if test "x$exec_prefix" = xNONE then am_py_exec_prefix=$am_py_prefix else am_py_exec_prefix=$exec_prefix fi am_cv_python_pyexecdir=`$PYTHON -c " $am_python_setup_sysconfig if can_use_sysconfig: sitedir = sysconfig.get_path('platlib', vars={'platbase':'$am_py_prefix'}) else: from distutils import sysconfig sitedir = sysconfig.get_python_lib(1, 0, prefix='$am_py_prefix') sys.stdout.write(sitedir)"` case $am_cv_python_pyexecdir in $am_py_exec_prefix*) am__strip_prefix=`echo "$am_py_exec_prefix" | sed 's|.|.|g'` am_cv_python_pyexecdir=`echo "$am_cv_python_pyexecdir" | sed "s,^$am__strip_prefix,$PYTHON_EXEC_PREFIX,"` ;; *) case $am_py_exec_prefix in /usr|/System*) ;; *) am_cv_python_pyexecdir=$PYTHON_EXEC_PREFIX/lib/python$PYTHON_VERSION/site-packages ;; esac ;; esac ]) AC_SUBST([pyexecdir], [$am_cv_python_pyexecdir]) dnl pkgpyexecdir -- $(pyexecdir)/$(PACKAGE) AC_SUBST([pkgpyexecdir], [\${pyexecdir}/$PACKAGE]) dnl Run any user-specified action. $2 fi ]) # AM_PYTHON_CHECK_VERSION(PROG, VERSION, [ACTION-IF-TRUE], [ACTION-IF-FALSE]) # --------------------------------------------------------------------------- # Run ACTION-IF-TRUE if the Python interpreter PROG has version >= VERSION. # Run ACTION-IF-FALSE otherwise. # This test uses sys.hexversion instead of the string equivalent (first # word of sys.version), in order to cope with versions such as 2.2c1. # This supports Python 2.0 or higher. (2.0 was released on October 16, 2000). AC_DEFUN([AM_PYTHON_CHECK_VERSION], [prog="import sys # split strings by '.' and convert to numeric. Append some zeros # because we need at least 4 digits for the hex conversion. # map returns an iterator in Python 3.0 and a list in 2.x minver = list(map(int, '$2'.split('.'))) + [[0, 0, 0]] minverhex = 0 # xrange is not present in Python 3.0 and range returns an iterator for i in list(range(0, 4)): minverhex = (minverhex << 8) + minver[[i]] sys.exit(sys.hexversion < minverhex)" AS_IF([AM_RUN_LOG([$1 -c "$prog"])], [$3], [$4])]) # Copyright (C) 2001-2013 Free Software Foundation, Inc. # # This file is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # AM_RUN_LOG(COMMAND) # ------------------- # Run COMMAND, save the exit status in ac_status, and log it. # (This has been adapted from Autoconf's _AC_RUN_LOG macro.) AC_DEFUN([AM_RUN_LOG], [{ echo "$as_me:$LINENO: $1" >&AS_MESSAGE_LOG_FD ($1) >&AS_MESSAGE_LOG_FD 2>&AS_MESSAGE_LOG_FD ac_status=$? echo "$as_me:$LINENO: \$? = $ac_status" >&AS_MESSAGE_LOG_FD (exit $ac_status); }]) # Check to make sure that the build environment is sane. -*- Autoconf -*- # Copyright (C) 1996-2013 Free Software Foundation, Inc. # # This file is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # AM_SANITY_CHECK # --------------- AC_DEFUN([AM_SANITY_CHECK], [AC_MSG_CHECKING([whether build environment is sane]) # Reject unsafe characters in $srcdir or the absolute working directory # name. Accept space and tab only in the latter. am_lf=' ' case `pwd` in *[[\\\"\#\$\&\'\`$am_lf]]*) AC_MSG_ERROR([unsafe absolute working directory name]);; esac case $srcdir in *[[\\\"\#\$\&\'\`$am_lf\ \ ]]*) AC_MSG_ERROR([unsafe srcdir value: '$srcdir']);; esac # Do 'set' in a subshell so we don't clobber the current shell's # arguments. Must try -L first in case configure is actually a # symlink; some systems play weird games with the mod time of symlinks # (eg FreeBSD returns the mod time of the symlink's containing # directory). if ( am_has_slept=no for am_try in 1 2; do echo "timestamp, slept: $am_has_slept" > conftest.file set X `ls -Lt "$srcdir/configure" conftest.file 2> /dev/null` if test "$[*]" = "X"; then # -L didn't work. set X `ls -t "$srcdir/configure" conftest.file` fi if test "$[*]" != "X $srcdir/configure conftest.file" \ && test "$[*]" != "X conftest.file $srcdir/configure"; then # If neither matched, then we have a broken ls. This can happen # if, for instance, CONFIG_SHELL is bash and it inherits a # broken ls alias from the environment. This has actually # happened. Such a system could not be considered "sane". AC_MSG_ERROR([ls -t appears to fail. Make sure there is not a broken alias in your environment]) fi if test "$[2]" = conftest.file || test $am_try -eq 2; then break fi # Just in case. sleep 1 am_has_slept=yes done test "$[2]" = conftest.file ) then # Ok. : else AC_MSG_ERROR([newly created file is older than distributed files! Check your system clock]) fi AC_MSG_RESULT([yes]) # If we didn't sleep, we still need to ensure time stamps of config.status and # generated files are strictly newer. am_sleep_pid= if grep 'slept: no' conftest.file >/dev/null 2>&1; then ( sleep 1 ) & am_sleep_pid=$! fi AC_CONFIG_COMMANDS_PRE( [AC_MSG_CHECKING([that generated files are newer than configure]) if test -n "$am_sleep_pid"; then # Hide warnings about reused PIDs. wait $am_sleep_pid 2>/dev/null fi AC_MSG_RESULT([done])]) rm -f conftest.file ]) # Copyright (C) 2009-2013 Free Software Foundation, Inc. # # This file is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # AM_SILENT_RULES([DEFAULT]) # -------------------------- # Enable less verbose build rules; with the default set to DEFAULT # ("yes" being less verbose, "no" or empty being verbose). AC_DEFUN([AM_SILENT_RULES], [AC_ARG_ENABLE([silent-rules], [dnl AS_HELP_STRING( [--enable-silent-rules], [less verbose build output (undo: "make V=1")]) AS_HELP_STRING( [--disable-silent-rules], [verbose build output (undo: "make V=0")])dnl ]) case $enable_silent_rules in @%:@ ((( yes) AM_DEFAULT_VERBOSITY=0;; no) AM_DEFAULT_VERBOSITY=1;; *) AM_DEFAULT_VERBOSITY=m4_if([$1], [yes], [0], [1]);; esac dnl dnl A few 'make' implementations (e.g., NonStop OS and NextStep) dnl do not support nested variable expansions. dnl See automake bug#9928 and bug#10237. am_make=${MAKE-make} AC_CACHE_CHECK([whether $am_make supports nested variables], [am_cv_make_support_nested_variables], [if AS_ECHO([['TRUE=$(BAR$(V)) BAR0=false BAR1=true V=1 am__doit: @$(TRUE) .PHONY: am__doit']]) | $am_make -f - >/dev/null 2>&1; then am_cv_make_support_nested_variables=yes else am_cv_make_support_nested_variables=no fi]) if test $am_cv_make_support_nested_variables = yes; then dnl Using '$V' instead of '$(V)' breaks IRIX make. AM_V='$(V)' AM_DEFAULT_V='$(AM_DEFAULT_VERBOSITY)' else AM_V=$AM_DEFAULT_VERBOSITY AM_DEFAULT_V=$AM_DEFAULT_VERBOSITY fi AC_SUBST([AM_V])dnl AM_SUBST_NOTMAKE([AM_V])dnl AC_SUBST([AM_DEFAULT_V])dnl AM_SUBST_NOTMAKE([AM_DEFAULT_V])dnl AC_SUBST([AM_DEFAULT_VERBOSITY])dnl AM_BACKSLASH='\' AC_SUBST([AM_BACKSLASH])dnl _AM_SUBST_NOTMAKE([AM_BACKSLASH])dnl ]) # Copyright (C) 2001-2013 Free Software Foundation, Inc. # # This file is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # AM_PROG_INSTALL_STRIP # --------------------- # One issue with vendor 'install' (even GNU) is that you can't # specify the program used to strip binaries. This is especially # annoying in cross-compiling environments, where the build's strip # is unlikely to handle the host's binaries. # Fortunately install-sh will honor a STRIPPROG variable, so we # always use install-sh in "make install-strip", and initialize # STRIPPROG with the value of the STRIP variable (set by the user). AC_DEFUN([AM_PROG_INSTALL_STRIP], [AC_REQUIRE([AM_PROG_INSTALL_SH])dnl # Installed binaries are usually stripped using 'strip' when the user # run "make install-strip". However 'strip' might not be the right # tool to use in cross-compilation environments, therefore Automake # will honor the 'STRIP' environment variable to overrule this program. dnl Don't test for $cross_compiling = yes, because it might be 'maybe'. if test "$cross_compiling" != no; then AC_CHECK_TOOL([STRIP], [strip], :) fi INSTALL_STRIP_PROGRAM="\$(install_sh) -c -s" AC_SUBST([INSTALL_STRIP_PROGRAM])]) # Copyright (C) 2006-2013 Free Software Foundation, Inc. # # This file is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # _AM_SUBST_NOTMAKE(VARIABLE) # --------------------------- # Prevent Automake from outputting VARIABLE = @VARIABLE@ in Makefile.in. # This macro is traced by Automake. AC_DEFUN([_AM_SUBST_NOTMAKE]) # AM_SUBST_NOTMAKE(VARIABLE) # -------------------------- # Public sister of _AM_SUBST_NOTMAKE. AC_DEFUN([AM_SUBST_NOTMAKE], [_AM_SUBST_NOTMAKE($@)]) # Check how to create a tarball. -*- Autoconf -*- # Copyright (C) 2004-2013 Free Software Foundation, Inc. # # This file is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # _AM_PROG_TAR(FORMAT) # -------------------- # Check how to create a tarball in format FORMAT. # FORMAT should be one of 'v7', 'ustar', or 'pax'. # # Substitute a variable $(am__tar) that is a command # writing to stdout a FORMAT-tarball containing the directory # $tardir. # tardir=directory && $(am__tar) > result.tar # # Substitute a variable $(am__untar) that extract such # a tarball read from stdin. # $(am__untar) < result.tar # AC_DEFUN([_AM_PROG_TAR], [# Always define AMTAR for backward compatibility. Yes, it's still used # in the wild :-( We should find a proper way to deprecate it ... AC_SUBST([AMTAR], ['$${TAR-tar}']) # We'll loop over all known methods to create a tar archive until one works. _am_tools='gnutar m4_if([$1], [ustar], [plaintar]) pax cpio none' m4_if([$1], [v7], [am__tar='$${TAR-tar} chof - "$$tardir"' am__untar='$${TAR-tar} xf -'], [m4_case([$1], [ustar], [# The POSIX 1988 'ustar' format is defined with fixed-size fields. # There is notably a 21 bits limit for the UID and the GID. In fact, # the 'pax' utility can hang on bigger UID/GID (see automake bug#8343 # and bug#13588). am_max_uid=2097151 # 2^21 - 1 am_max_gid=$am_max_uid # The $UID and $GID variables are not portable, so we need to resort # to the POSIX-mandated id(1) utility. Errors in the 'id' calls # below are definitely unexpected, so allow the users to see them # (that is, avoid stderr redirection). am_uid=`id -u || echo unknown` am_gid=`id -g || echo unknown` AC_MSG_CHECKING([whether UID '$am_uid' is supported by ustar format]) if test $am_uid -le $am_max_uid; then AC_MSG_RESULT([yes]) else AC_MSG_RESULT([no]) _am_tools=none fi AC_MSG_CHECKING([whether GID '$am_gid' is supported by ustar format]) if test $am_gid -le $am_max_gid; then AC_MSG_RESULT([yes]) else AC_MSG_RESULT([no]) _am_tools=none fi], [pax], [], [m4_fatal([Unknown tar format])]) AC_MSG_CHECKING([how to create a $1 tar archive]) # Go ahead even if we have the value already cached. We do so because we # need to set the values for the 'am__tar' and 'am__untar' variables. _am_tools=${am_cv_prog_tar_$1-$_am_tools} for _am_tool in $_am_tools; do case $_am_tool in gnutar) for _am_tar in tar gnutar gtar; do AM_RUN_LOG([$_am_tar --version]) && break done am__tar="$_am_tar --format=m4_if([$1], [pax], [posix], [$1]) -chf - "'"$$tardir"' am__tar_="$_am_tar --format=m4_if([$1], [pax], [posix], [$1]) -chf - "'"$tardir"' am__untar="$_am_tar -xf -" ;; plaintar) # Must skip GNU tar: if it does not support --format= it doesn't create # ustar tarball either. (tar --version) >/dev/null 2>&1 && continue am__tar='tar chf - "$$tardir"' am__tar_='tar chf - "$tardir"' am__untar='tar xf -' ;; pax) am__tar='pax -L -x $1 -w "$$tardir"' am__tar_='pax -L -x $1 -w "$tardir"' am__untar='pax -r' ;; cpio) am__tar='find "$$tardir" -print | cpio -o -H $1 -L' am__tar_='find "$tardir" -print | cpio -o -H $1 -L' am__untar='cpio -i -H $1 -d' ;; none) am__tar=false am__tar_=false am__untar=false ;; esac # If the value was cached, stop now. We just wanted to have am__tar # and am__untar set. test -n "${am_cv_prog_tar_$1}" && break # tar/untar a dummy directory, and stop if the command works. rm -rf conftest.dir mkdir conftest.dir echo GrepMe > conftest.dir/file AM_RUN_LOG([tardir=conftest.dir && eval $am__tar_ >conftest.tar]) rm -rf conftest.dir if test -s conftest.tar; then AM_RUN_LOG([$am__untar /dev/null 2>&1 && break fi done rm -rf conftest.dir AC_CACHE_VAL([am_cv_prog_tar_$1], [am_cv_prog_tar_$1=$_am_tool]) AC_MSG_RESULT([$am_cv_prog_tar_$1])]) AC_SUBST([am__tar]) AC_SUBST([am__untar]) ]) # _AM_PROG_TAR m4_include([m4/ax_python_module.m4]) m4_include([m4/libtool.m4]) m4_include([m4/ltoptions.m4]) m4_include([m4/ltsugar.m4]) m4_include([m4/ltversion.m4]) m4_include([m4/lt~obsolete.m4]) libstoragemgmt-1.2.3/INSTALL0000644000175000017500000003660512540163524012523 00000000000000Installation Instructions ************************* Copyright (C) 1994-1996, 1999-2002, 2004-2013 Free Software Foundation, Inc. Copying and distribution of this file, with or without modification, are permitted in any medium without royalty provided the copyright notice and this notice are preserved. This file is offered as-is, without warranty of any kind. Basic Installation ================== Briefly, the shell commands `./configure; make; make install' should configure, build, and install this package. The following more-detailed instructions are generic; see the `README' file for instructions specific to this package. Some packages provide this `INSTALL' file but do not implement all of the features documented below. The lack of an optional feature in a given package is not necessarily a bug. More recommendations for GNU packages can be found in *note Makefile Conventions: (standards)Makefile Conventions. The `configure' shell script attempts to guess correct values for various system-dependent variables used during compilation. It uses those values to create a `Makefile' in each directory of the package. It may also create one or more `.h' files containing system-dependent definitions. Finally, it creates a shell script `config.status' that you can run in the future to recreate the current configuration, and a file `config.log' containing compiler output (useful mainly for debugging `configure'). It can also use an optional file (typically called `config.cache' and enabled with `--cache-file=config.cache' or simply `-C') that saves the results of its tests to speed up reconfiguring. Caching is disabled by default to prevent problems with accidental use of stale cache files. If you need to do unusual things to compile the package, please try to figure out how `configure' could check whether to do them, and mail diffs or instructions to the address given in the `README' so they can be considered for the next release. If you are using the cache, and at some point `config.cache' contains results you don't want to keep, you may remove or edit it. The file `configure.ac' (or `configure.in') is used to create `configure' by a program called `autoconf'. You need `configure.ac' if you want to change it or regenerate `configure' using a newer version of `autoconf'. The simplest way to compile this package is: 1. `cd' to the directory containing the package's source code and type `./configure' to configure the package for your system. Running `configure' might take a while. While running, it prints some messages telling which features it is checking for. 2. Type `make' to compile the package. 3. Optionally, type `make check' to run any self-tests that come with the package, generally using the just-built uninstalled binaries. 4. Type `make install' to install the programs and any data files and documentation. When installing into a prefix owned by root, it is recommended that the package be configured and built as a regular user, and only the `make install' phase executed with root privileges. 5. Optionally, type `make installcheck' to repeat any self-tests, but this time using the binaries in their final installed location. This target does not install anything. Running this target as a regular user, particularly if the prior `make install' required root privileges, verifies that the installation completed correctly. 6. You can remove the program binaries and object files from the source code directory by typing `make clean'. To also remove the files that `configure' created (so you can compile the package for a different kind of computer), type `make distclean'. There is also a `make maintainer-clean' target, but that is intended mainly for the package's developers. If you use it, you may have to get all sorts of other programs in order to regenerate files that came with the distribution. 7. Often, you can also type `make uninstall' to remove the installed files again. In practice, not all packages have tested that uninstallation works correctly, even though it is required by the GNU Coding Standards. 8. Some packages, particularly those that use Automake, provide `make distcheck', which can by used by developers to test that all other targets like `make install' and `make uninstall' work correctly. This target is generally not run by end users. Compilers and Options ===================== Some systems require unusual options for compilation or linking that the `configure' script does not know about. Run `./configure --help' for details on some of the pertinent environment variables. You can give `configure' initial values for configuration parameters by setting variables in the command line or in the environment. Here is an example: ./configure CC=c99 CFLAGS=-g LIBS=-lposix *Note Defining Variables::, for more details. Compiling For Multiple Architectures ==================================== You can compile the package for more than one kind of computer at the same time, by placing the object files for each architecture in their own directory. To do this, you can use GNU `make'. `cd' to the directory where you want the object files and executables to go and run the `configure' script. `configure' automatically checks for the source code in the directory that `configure' is in and in `..'. This is known as a "VPATH" build. With a non-GNU `make', it is safer to compile the package for one architecture at a time in the source code directory. After you have installed the package for one architecture, use `make distclean' before reconfiguring for another architecture. On MacOS X 10.5 and later systems, you can create libraries and executables that work on multiple system types--known as "fat" or "universal" binaries--by specifying multiple `-arch' options to the compiler but only a single `-arch' option to the preprocessor. Like this: ./configure CC="gcc -arch i386 -arch x86_64 -arch ppc -arch ppc64" \ CXX="g++ -arch i386 -arch x86_64 -arch ppc -arch ppc64" \ CPP="gcc -E" CXXCPP="g++ -E" This is not guaranteed to produce working output in all cases, you may have to build one architecture at a time and combine the results using the `lipo' tool if you have problems. Installation Names ================== By default, `make install' installs the package's commands under `/usr/local/bin', include files under `/usr/local/include', etc. You can specify an installation prefix other than `/usr/local' by giving `configure' the option `--prefix=PREFIX', where PREFIX must be an absolute file name. You can specify separate installation prefixes for architecture-specific files and architecture-independent files. If you pass the option `--exec-prefix=PREFIX' to `configure', the package uses PREFIX as the prefix for installing programs and libraries. Documentation and other data files still use the regular prefix. In addition, if you use an unusual directory layout you can give options like `--bindir=DIR' to specify different values for particular kinds of files. Run `configure --help' for a list of the directories you can set and what kinds of files go in them. In general, the default for these options is expressed in terms of `${prefix}', so that specifying just `--prefix' will affect all of the other directory specifications that were not explicitly provided. The most portable way to affect installation locations is to pass the correct locations to `configure'; however, many packages provide one or both of the following shortcuts of passing variable assignments to the `make install' command line to change installation locations without having to reconfigure or recompile. The first method involves providing an override variable for each affected directory. For example, `make install prefix=/alternate/directory' will choose an alternate location for all directory configuration variables that were expressed in terms of `${prefix}'. Any directories that were specified during `configure', but not in terms of `${prefix}', must each be overridden at install time for the entire installation to be relocated. The approach of makefile variable overrides for each directory variable is required by the GNU Coding Standards, and ideally causes no recompilation. However, some platforms have known limitations with the semantics of shared libraries that end up requiring recompilation when using this method, particularly noticeable in packages that use GNU Libtool. The second method involves providing the `DESTDIR' variable. For example, `make install DESTDIR=/alternate/directory' will prepend `/alternate/directory' before all installation names. The approach of `DESTDIR' overrides is not required by the GNU Coding Standards, and does not work on platforms that have drive letters. On the other hand, it does better at avoiding recompilation issues, and works well even when some directory options were not specified in terms of `${prefix}' at `configure' time. Optional Features ================= If the package supports it, you can cause programs to be installed with an extra prefix or suffix on their names by giving `configure' the option `--program-prefix=PREFIX' or `--program-suffix=SUFFIX'. Some packages pay attention to `--enable-FEATURE' options to `configure', where FEATURE indicates an optional part of the package. They may also pay attention to `--with-PACKAGE' options, where PACKAGE is something like `gnu-as' or `x' (for the X Window System). The `README' should mention any `--enable-' and `--with-' options that the package recognizes. For packages that use the X Window System, `configure' can usually find the X include and library files automatically, but if it doesn't, you can use the `configure' options `--x-includes=DIR' and `--x-libraries=DIR' to specify their locations. Some packages offer the ability to configure how verbose the execution of `make' will be. For these packages, running `./configure --enable-silent-rules' sets the default to minimal output, which can be overridden with `make V=1'; while running `./configure --disable-silent-rules' sets the default to verbose, which can be overridden with `make V=0'. Particular systems ================== On HP-UX, the default C compiler is not ANSI C compatible. If GNU CC is not installed, it is recommended to use the following options in order to use an ANSI C compiler: ./configure CC="cc -Ae -D_XOPEN_SOURCE=500" and if that doesn't work, install pre-built binaries of GCC for HP-UX. HP-UX `make' updates targets which have the same time stamps as their prerequisites, which makes it generally unusable when shipped generated files such as `configure' are involved. Use GNU `make' instead. On OSF/1 a.k.a. Tru64, some versions of the default C compiler cannot parse its `' header file. The option `-nodtk' can be used as a workaround. If GNU CC is not installed, it is therefore recommended to try ./configure CC="cc" and if that doesn't work, try ./configure CC="cc -nodtk" On Solaris, don't put `/usr/ucb' early in your `PATH'. This directory contains several dysfunctional programs; working variants of these programs are available in `/usr/bin'. So, if you need `/usr/ucb' in your `PATH', put it _after_ `/usr/bin'. On Haiku, software installed for all users goes in `/boot/common', not `/usr/local'. It is recommended to use the following options: ./configure --prefix=/boot/common Specifying the System Type ========================== There may be some features `configure' cannot figure out automatically, but needs to determine by the type of machine the package will run on. Usually, assuming the package is built to be run on the _same_ architectures, `configure' can figure that out, but if it prints a message saying it cannot guess the machine type, give it the `--build=TYPE' option. TYPE can either be a short name for the system type, such as `sun4', or a canonical name which has the form: CPU-COMPANY-SYSTEM where SYSTEM can have one of these forms: OS KERNEL-OS See the file `config.sub' for the possible values of each field. If `config.sub' isn't included in this package, then this package doesn't need to know the machine type. If you are _building_ compiler tools for cross-compiling, you should use the option `--target=TYPE' to select the type of system they will produce code for. If you want to _use_ a cross compiler, that generates code for a platform different from the build platform, you should specify the "host" platform (i.e., that on which the generated programs will eventually be run) with `--host=TYPE'. Sharing Defaults ================ If you want to set default values for `configure' scripts to share, you can create a site shell script called `config.site' that gives default values for variables like `CC', `cache_file', and `prefix'. `configure' looks for `PREFIX/share/config.site' if it exists, then `PREFIX/etc/config.site' if it exists. Or, you can set the `CONFIG_SITE' environment variable to the location of the site script. A warning: not all `configure' scripts look for a site script. Defining Variables ================== Variables not defined in a site shell script can be set in the environment passed to `configure'. However, some packages may run configure again during the build, and the customized values of these variables may be lost. In order to avoid this problem, you should set them in the `configure' command line, using `VAR=value'. For example: ./configure CC=/usr/local2/bin/gcc causes the specified `gcc' to be used as the C compiler (unless it is overridden in the site shell script). Unfortunately, this technique does not work for `CONFIG_SHELL' due to an Autoconf limitation. Until the limitation is lifted, you can use this workaround: CONFIG_SHELL=/bin/bash ./configure CONFIG_SHELL=/bin/bash `configure' Invocation ====================== `configure' recognizes the following options to control how it operates. `--help' `-h' Print a summary of all of the options to `configure', and exit. `--help=short' `--help=recursive' Print a summary of the options unique to this package's `configure', and exit. The `short' variant lists options used only in the top level, while the `recursive' variant lists options also present in any nested packages. `--version' `-V' Print the version of Autoconf used to generate the `configure' script, and exit. `--cache-file=FILE' Enable the cache: use and save the results of the tests in FILE, traditionally `config.cache'. FILE defaults to `/dev/null' to disable caching. `--config-cache' `-C' Alias for `--cache-file=config.cache'. `--quiet' `--silent' `-q' Do not print messages saying which checks are being made. To suppress all normal output, redirect it to `/dev/null' (any error messages will still be shown). `--srcdir=DIR' Look for the package's source code in directory DIR. Usually `configure' can determine that directory automatically. `--prefix=DIR' Use DIR as the installation prefix. *note Installation Names:: for more details, including other options available for fine-tuning the installation locations. `--no-create' `-n' Run the configure checks, but stop before creating any output files. `configure' also accepts some other, not widely useful, options. Run `configure --help' for more details. libstoragemgmt-1.2.3/config.h.in0000664000175000017500000001140212542455452013511 00000000000000/* config.h.in. Generated from configure.ac by autoheader. */ /* Define to 1 if you have the header file. */ #undef HAVE_DLFCN_H /* Define to 1 if you have the `getpass' function. */ #undef HAVE_GETPASS /* Define to 1 if you have the header file. */ #undef HAVE_INTTYPES_H /* Define to 1 if your system has a GNU libc compatible `malloc' function, and to 0 otherwise. */ #undef HAVE_MALLOC /* Define to 1 if you have the header file. */ #undef HAVE_MEMORY_H /* Define to 1 if you have the `memset' function. */ #undef HAVE_MEMSET /* Define to 1 if your system has a GNU libc compatible `realloc' function, and to 0 otherwise. */ #undef HAVE_REALLOC /* Define to 1 if you have the `socket' function. */ #undef HAVE_SOCKET /* Define to 1 if stdbool.h conforms to C99. */ #undef HAVE_STDBOOL_H /* Define to 1 if you have the header file. */ #undef HAVE_STDINT_H /* Define to 1 if you have the header file. */ #undef HAVE_STDLIB_H /* Define to 1 if you have the `strchr' function. */ #undef HAVE_STRCHR /* Define to 1 if you have the `strdup' function. */ #undef HAVE_STRDUP /* Define to 1 if you have the header file. */ #undef HAVE_STRINGS_H /* Define to 1 if you have the header file. */ #undef HAVE_STRING_H /* Define to 1 if you have the `strtol' function. */ #undef HAVE_STRTOL /* Define to 1 if you have the `strtoul' function. */ #undef HAVE_STRTOUL /* Define to 1 if you have the header file. */ #undef HAVE_SYSLOG_H /* Define to 1 if you have the header file. */ #undef HAVE_SYS_SOCKET_H /* Define to 1 if you have the header file. */ #undef HAVE_SYS_STAT_H /* Define to 1 if you have the header file. */ #undef HAVE_SYS_TYPES_H /* Define to 1 if you have the header file. */ #undef HAVE_UNISTD_H /* Define to 1 if you have the header file. */ #undef HAVE_YAJL_YAJL_GEN_H /* Define to 1 if you have the header file. */ #undef HAVE_YAJL_YAJL_PARSE_H /* Define to 1 if you have the header file. */ #undef HAVE_YAJL_YAJL_VERSION_H /* Define to 1 if the system has the type `_Bool'. */ #undef HAVE__BOOL /* Define to the sub-directory in which libtool stores uninstalled libraries. */ #undef LT_OBJDIR /* Define to 1 if your C compiler doesn't accept -c and -o together. */ #undef NO_MINUS_C_MINUS_O /* Name of package */ #undef PACKAGE /* Define to the address where bug reports for this package should be sent. */ #undef PACKAGE_BUGREPORT /* Define to the full name of this package. */ #undef PACKAGE_NAME /* Define to the full name and version of this package. */ #undef PACKAGE_STRING /* Define to the one symbol short name of this package. */ #undef PACKAGE_TARNAME /* Define to the home page for this package. */ #undef PACKAGE_URL /* Define to the version of this package. */ #undef PACKAGE_VERSION /* Define to 1 if you have the ANSI C header files. */ #undef STDC_HEADERS /* Version number of package */ #undef VERSION /* Define for Solaris 2.5.1 so the uint32_t typedef from , , or is not used. If the typedef were allowed, the #define below would cause a syntax error. */ #undef _UINT32_T /* Define for Solaris 2.5.1 so the uint64_t typedef from , , or is not used. If the typedef were allowed, the #define below would cause a syntax error. */ #undef _UINT64_T /* Define for Solaris 2.5.1 so the uint8_t typedef from , , or is not used. If the typedef were allowed, the #define below would cause a syntax error. */ #undef _UINT8_T /* Define to the type of a signed integer type of width exactly 32 bits if such a type exists and the standard includes do not define it. */ #undef int32_t /* Define to the type of a signed integer type of width exactly 64 bits if such a type exists and the standard includes do not define it. */ #undef int64_t /* Define to rpl_malloc if the replacement function should be used. */ #undef malloc /* Define to rpl_realloc if the replacement function should be used. */ #undef realloc /* Define to `unsigned int' if does not define. */ #undef size_t /* Define to `int' if does not define. */ #undef ssize_t /* Define to the type of an unsigned integer type of width exactly 32 bits if such a type exists and the standard includes do not define it. */ #undef uint32_t /* Define to the type of an unsigned integer type of width exactly 64 bits if such a type exists and the standard includes do not define it. */ #undef uint64_t /* Define to the type of an unsigned integer type of width exactly 8 bits if such a type exists and the standard includes do not define it. */ #undef uint8_t libstoragemgmt-1.2.3/daemon/0000775000175000017500000000000012542455463013015 500000000000000libstoragemgmt-1.2.3/daemon/lsm_daemon.c0000664000175000017500000006202112537737032015217 00000000000000/* * Copyright (C) 2011-2015 Red Hat, Inc. * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; If not, see . * * Author: tasleson */ #define _GNU_SOURCE #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #define BASE_DIR "/var/run/lsm" #define SOCKET_DIR BASE_DIR"/ipc" #define PLUGIN_DIR "/usr/bin" #define LSM_USER "libstoragemgmt" #define LSM_CONF_DIR "/etc/lsm/" #define LSM_PLUGIN_CONF_DIR_NAME "pluginconf.d" #define LSMD_CONF_FILE "lsmd.conf" #define LSM_CONF_ALLOW_ROOT_OPT_NAME "allow-plugin-root-privilege" #define LSM_CONF_REQUIRE_ROOT_OPT_NAME "require-root-privilege" #define min(a,b) \ ({ __typeof__ (a) _a = (a); \ __typeof__ (b) _b = (b); \ _a < _b ? _a : _b; }) #define max(a,b) \ ({ __typeof__ (a) _a = (a); \ __typeof__ (b) _b = (b); \ _a > _b ? _a : _b; }) int verbose_flag = 0; int systemd = 0; char *socket_dir = SOCKET_DIR; char *plugin_dir = PLUGIN_DIR; char *conf_dir = LSM_CONF_DIR; char plugin_extension[] = "_lsmplugin"; char plugin_conf_extension[] = ".conf"; typedef enum { RUNNING, RESTART, EXIT } serve_type; serve_type serve_state = RUNNING; int plugin_mem_debug = 0; int allow_root_plugin = 0; int has_root_plugin = 0; /** * Each item in plugin list contains this information */ struct plugin { char *file_path; int require_root; int fd; LIST_ENTRY(plugin) pointers; }; /** * Linked list of plug-ins */ LIST_HEAD(plugin_list, plugin) head; /** * Logs messages to the appropriate place * @param severity Severity of message, LOG_ERR causes daemon to exit * @param fmt String with format * @param ... Format parameters */ void logger(int severity, const char *fmt, ...) { char buf[2048]; if (verbose_flag || LOG_WARNING == severity || LOG_ERR == severity) { va_list arg; va_start(arg, fmt); vsnprintf(buf, sizeof(buf), fmt, arg); va_end(arg); if (!systemd) { if (verbose_flag) { syslog(LOG_ERR, "%s", buf); } else { syslog(severity, "%s", buf); } } else { fprintf(stdout, "%s", buf); fflush(stdout); } if (LOG_ERR == severity) { exit(1); } } } #define log_and_exit(fmt, ...) logger(LOG_ERR, fmt, ##__VA_ARGS__) #define warn(fmt, ...) logger(LOG_WARNING, fmt, ##__VA_ARGS__) #define info(fmt, ...) logger(LOG_INFO, fmt, ##__VA_ARGS__) /** * Our signal handler. * @param s Received signal */ void signal_handler(int s) { if (SIGTERM == s) { serve_state = EXIT; } else if (SIGHUP == s) { serve_state = RESTART; } } /** * Installs our signal handler */ void install_sh(void) { if (signal(SIGTERM, signal_handler) == SIG_ERR) { log_and_exit("Can't catch signal SIGTERM\n"); } if (signal(SIGHUP, signal_handler) == SIG_ERR) { log_and_exit("Can't catch signal SIGHUP\n"); } } /** * If we are running as root, we will try to drop our privs. to our default * user. */ void drop_privileges(void) { int err = 0; struct passwd *pw = NULL; pw = getpwnam(LSM_USER); if (pw) { if (!geteuid()) { if (-1 == setgid(pw->pw_gid)) { err = errno; log_and_exit("Unexpected error on setgid(errno %d)\n", err); } if (-1 == setgroups(1, &pw->pw_gid)) { err = errno; log_and_exit("Unexpected error on setgroups(errno %d)\n", err); } if (-1 == setuid(pw->pw_uid)) { err = errno; log_and_exit("Unexpected error on setuid(errno %d)\n", err); } } else if (pw->pw_uid != getuid()) { warn("Daemon not running as correct user\n"); } } else { info("Warn: Missing %s user, running as existing user!\n", LSM_USER); } } /** * Check to make sure we have access to the directories of interest */ void flight_check(void) { int err = 0; if (-1 == access(socket_dir, R_OK | W_OK)) { err = errno; log_and_exit("Unable to access socket directory %s, errno= %d\n", socket_dir, err); } if (-1 == access(plugin_dir, R_OK | X_OK)) { err = errno; log_and_exit("Unable to access plug-in directory %s, errno= %d\n", plugin_dir, err); } } /** * Print help. */ void usage(void) { printf("libStorageMgmt plug-in daemon.\n"); printf("lsmd [--plugindir ] [--socketdir ] [-v] [-d]\n"); printf(" --plugindir = The directory where the plugins are located\n"); printf(" --socketdir = The directory where the Unix domain sockets will " "be created\n"); printf(" --confdir = The directory where the config files are " "located\n"); printf(" -v = Verbose logging\n"); printf(" -d = new style daemon (systemd)\n"); } /** * Concatenates a path and a file name. * @param path Fully qualified path * @param name File name * @return Concatenated string, caller must call free when done */ char *path_form(const char *path, const char *name) { size_t s = strlen(path) + strlen(name) + 2; char *full = calloc(1, s); if (full) { snprintf(full, s, "%s/%s", path, name); } else { log_and_exit("malloc failure while trying to allocate %d bytes\n", s); } return full; } /* Call back signature */ typedef int (*file_op) (void *p, char *full_file_path); /** * For a given directory iterate through each directory item and exec the * callback, recursively process nested directories too. * @param dir Directory to transverse * @param p Pointer to user data (Optional) * @param call_back Function to call against file * @return */ void process_directory(char *dir, void *p, file_op call_back) { int err = 0; if (call_back && dir && strlen(dir)) { DIR *dp = NULL; struct dirent *entry = NULL; char *full_name = NULL; dp = opendir(dir); if (dp) { while ((entry = readdir(dp)) != NULL) { struct stat entry_st; free(full_name); full_name = path_form(dir, entry->d_name); if (lstat(full_name, &entry_st) != 0) { continue; } if (S_ISDIR(entry_st.st_mode)) { if (strncmp(entry->d_name, ".", 1) == 0) { continue; } process_directory(full_name, p, call_back); } else { if (call_back(p, full_name)) { break; } } } free(full_name); if (closedir(dp)) { err = errno; log_and_exit("Error on closing dir %s: %s\n", dir, strerror(err)); } } else { err = errno; log_and_exit("Error on processing directory %s: %s\n", dir, strerror(err)); } } } /** * Callback to remove a unix domain socket by deleting it. * @param p Call back data * @param full_name Full path an and file name * @return 0 to continue processing, anything else to stop. */ int delete_socket(void *p, char *full_name) { struct stat statbuf; int err; assert(p == NULL); if (!lstat(full_name, &statbuf)) { if (S_ISSOCK(statbuf.st_mode)) { if (unlink(full_name)) { err = errno; log_and_exit("Error on unlinking file %s: %s\n", full_name, strerror(err)); } } } return 0; } /** * Walk the IPC socket directory and remove the socket files. */ void clean_sockets() { process_directory(socket_dir, NULL, delete_socket); } /** * Given a fully qualified path and name to a plug-in, create the IPC socket. * @param full_name Full name and path for plug-in * @return listening socket descriptor for IPC */ int setup_socket(char *full_name) { int err = 0; char name[128]; /* Strip off _lsmplugin from the file name, not sure * why I chose to do this */ memset(name, 0, sizeof(name)); char *base_nm = basename(full_name); strncpy(name, base_nm, min(abs(strlen(base_nm) - strlen(plugin_extension)), (sizeof(name) - 1))); char *socket_file = path_form(socket_dir, name); delete_socket(NULL, socket_file); int fd = socket(AF_UNIX, SOCK_STREAM, 0); if (-1 != fd) { struct sockaddr_un addr; memset(&addr, 0, sizeof(addr)); addr.sun_family = AF_UNIX; strncpy(addr.sun_path, socket_file, sizeof(addr.sun_path) - 1); if (-1 == bind(fd, (struct sockaddr *) &addr, sizeof(struct sockaddr_un))) { err = errno; log_and_exit("Error on binding socket %s: %s\n", socket_file, strerror(err)); } if (-1 == chmod(socket_file, S_IREAD | S_IWRITE | S_IRGRP | S_IWGRP | S_IROTH | S_IWOTH)) { err = errno; log_and_exit("Error on chmod socket file %s: %s\n", socket_file, strerror(err)); } if (-1 == listen(fd, 5)) { err = errno; log_and_exit("Error on listening %s: %s\n", socket_file, strerror(err)); } } else { err = errno; log_and_exit("Error on socket create %s: %s\n", socket_file, strerror(err)); } free(socket_file); return fd; } /** * Closes all the listening sockets and re-claims memory in linked list. * @param list */ void empty_plugin_list(struct plugin_list *list) { int err; struct plugin *item = NULL; while (!LIST_EMPTY(list)) { item = LIST_FIRST(list); LIST_REMOVE(item, pointers); if (-1 == close(item->fd)) { err = errno; info("Error on closing fd %d for file %s: %s\n", item->fd, item->file_path, strerror(err)); } free(item->file_path); item->file_path = NULL; item->fd = INT_MAX; free(item); } } /** * Parse config and seeking provided key name bool * 1. Keep value untouched if file not exist * 2. If file is not readable, abort via log_and_exit() * 3. Keep value untouched if provided key not found * 4. Abort via log_and_exit() if no enough memory. * @param conf_path config file path * @param key_name string, searching key * @param value int, output, value of this config key */ void parse_conf_bool(char *conf_path, char *key_name, int *value) { if (access(conf_path, F_OK) == -1) { /* file not exist. */ return; } config_t *cfg = (config_t *) malloc(sizeof(config_t)); if (cfg) { config_init(cfg); if (CONFIG_TRUE == config_read_file(cfg, conf_path)) { config_lookup_bool(cfg, key_name, value); } else { log_and_exit("configure %s parsing failed: %s at line %d\n", conf_path, config_error_text(cfg), config_error_line(cfg)); } } else { log_and_exit ("malloc failure while trying to allocate memory for config_t\n"); } config_destroy(cfg); free(cfg); } /** * Load plugin config for root privilege setting. * If config not found, return 0 for no root privilege required. * @param plugin_path Full path of plugin * @return 1 for require root privilege, 0 or not. */ int chk_pconf_root_pri(char *plugin_path) { int require_root = 0; char *base_name = basename(plugin_path); ssize_t plugin_name_len = strlen(base_name) - strlen(plugin_extension); if (plugin_name_len <= 0) { log_and_exit("Got invalid plugin full path %s\n", plugin_path); } ssize_t conf_file_name_len = plugin_name_len + strlen(plugin_conf_extension) + 1; char *plugin_conf_filename = (char *) malloc(conf_file_name_len); if (plugin_conf_filename) { strncpy(plugin_conf_filename, base_name, plugin_name_len); strncpy(plugin_conf_filename + plugin_name_len, plugin_conf_extension, strlen(plugin_conf_extension)); plugin_conf_filename[conf_file_name_len - 1] = '\0'; char *plugin_conf_dir_path = path_form(conf_dir, LSM_PLUGIN_CONF_DIR_NAME); char *plugin_conf_path = path_form(plugin_conf_dir_path, plugin_conf_filename); parse_conf_bool(plugin_conf_path, LSM_CONF_REQUIRE_ROOT_OPT_NAME, &require_root); if (require_root == 1 && allow_root_plugin == 0) { warn("Plugin %s require root privilege while %s disable globally\n", base_name, LSMD_CONF_FILE); } free(plugin_conf_dir_path); free(plugin_conf_filename); free(plugin_conf_path); } else { log_and_exit("malloc failure while trying to allocate %d " "bytes\n", conf_file_name_len); } return require_root; } /** * Call back for plug-in processing. * @param p Private data * @param full_name Full path and file name * @return 0 to continue, else abort directory processing */ int process_plugin(void *p, char *full_name) { if (full_name) { size_t ext_len = strlen(plugin_extension); size_t full_len = strlen(full_name); if (full_len > ext_len) { if (strncmp (full_name + full_len - ext_len, plugin_extension, ext_len) == 0) { struct plugin *item = calloc(1, sizeof(struct plugin)); if (item) { item->file_path = strdup(full_name); item->fd = setup_socket(full_name); item->require_root = chk_pconf_root_pri(full_name); has_root_plugin |= item->require_root; if (item->file_path && item->fd >= 0) { LIST_INSERT_HEAD((struct plugin_list *) p, item, pointers); info("Plugin %s added\n", full_name); } else { /* The only real way to get here is failed strdup as setup_socket will exit on error. */ free(item); item = NULL; log_and_exit("strdup failed %s\n", full_name); } } else { log_and_exit("Memory allocation failure!\n"); } } } } return 0; } /** * Cleans up any children that have exited. */ void child_cleanup(void) { int rc; int err; do { siginfo_t si; memset(&si, 0, sizeof(siginfo_t)); rc = waitid(P_ALL, 0, &si, WNOHANG | WEXITED); if (-1 == rc) { err = errno; if (err != ECHILD) { info("waitid %d - %s\n", err, strerror(err)); } break; } else { if (0 == rc && si.si_pid == 0) { break; } else { if (si.si_code == CLD_EXITED && si.si_status != 0) { info("Plug-in process %d exited with %d\n", si.si_pid, si.si_status); } } } } while (1); } /** * Closes and frees memory and removes Unix domain sockets. */ void clean_up(void) { empty_plugin_list(&head); clean_sockets(); } /** * Walks the plugin directory creating IPC sockets for each one. * @return */ int process_plugins(void) { clean_up(); info("Scanning plug-in directory %s\n", plugin_dir); process_directory(plugin_dir, &head, process_plugin); if (allow_root_plugin == 1 && has_root_plugin == 0) { info("No plugin requires root privilege, dropping root privilege\n"); flight_check(); drop_privileges(); } return 0; } /** * Given a socket descriptor looks it up and returns the plug-in * @param fd Socket descriptor to lookup * @return struct plugin */ struct plugin *plugin_lookup(int fd) { struct plugin *plug = NULL; LIST_FOREACH(plug, &head, pointers) { if (plug->fd == fd) { return plug; } } return NULL; } /** * Does the actual fork and exec of the plug-in * @param plugin Full filename and path of plug-in to exec. * @param client_fd Client connected file descriptor * @param require_root int, indicate whether this plugin require root * privilege or not */ void exec_plugin(char *plugin, int client_fd, int require_root) { int err = 0; info("Exec'ing plug-in = %s\n", plugin); pid_t process = fork(); if (process) { /* Parent */ int rc = close(client_fd); if (-1 == rc) { err = errno; info("Error on closing accepted socket in parent: %s\n", strerror(err)); } } else { /* Child */ int exec_rc = 0; char fd_str[12]; char *plugin_argv[7]; extern char **environ; struct ucred cli_user_cred; socklen_t cli_user_cred_len = sizeof(cli_user_cred); /* * The plugin will still run no matter with root privilege or not. * so that client could get detailed error message. */ if (require_root == 0) { drop_privileges(); } else { if (getuid()) { warn("Plugin %s require root privilege, but lsmd daemon " "is not run as root user\n", plugin); } else if (allow_root_plugin == 0) { warn("Plugin %s require root privilege, but %s disabled " "it globally\n", LSMD_CONF_FILE); drop_privileges(); } else { /* Check socket client uid */ int rc_get_cli_uid = getsockopt(client_fd, SOL_SOCKET, SO_PEERCRED, &cli_user_cred, &cli_user_cred_len); if (0 == rc_get_cli_uid) { if (cli_user_cred.uid != 0) { warn("Plugin %s require root privilege, but " "client is not run as root user\n", plugin); drop_privileges(); } else { info("Plugin %s is running as root privilege\n", plugin); } } else { warn("Failed to get client socket uid, getsockopt() " "error: %d\n", errno); drop_privileges(); } } } /* Make copy of plug-in string as once we call empty_plugin_list it * will be deleted :-) */ char *p_copy = strdup(plugin); empty_plugin_list(&head); sprintf(fd_str, "%d", client_fd); if (plugin_mem_debug) { char debug_out[64]; snprintf(debug_out, (sizeof(debug_out) - 1), "--log-file=/tmp/leaking_%d-%d", getppid(), getpid()); plugin_argv[0] = "valgrind"; plugin_argv[1] = "--leak-check=full"; plugin_argv[2] = "--show-reachable=no"; plugin_argv[3] = debug_out; plugin_argv[4] = p_copy; plugin_argv[5] = fd_str; plugin_argv[6] = NULL; exec_rc = execve("/usr/bin/valgrind", plugin_argv, environ); } else { plugin_argv[0] = basename(p_copy); plugin_argv[1] = fd_str; plugin_argv[2] = NULL; exec_rc = execve(p_copy, plugin_argv, environ); } if (-1 == exec_rc) { int err = errno; log_and_exit("Error on exec'ing Plugin %s: %s\n", p_copy, strerror(err)); } } } /** * Main event loop */ void _serving(void) { struct plugin *plug = NULL; struct timeval tmo; fd_set readfds; int nfds = 0; int err = 0; process_plugins(); while (serve_state == RUNNING) { FD_ZERO(&readfds); nfds = 0; tmo.tv_sec = 15; tmo.tv_usec = 0; LIST_FOREACH(plug, &head, pointers) { nfds = max(plug->fd, nfds); FD_SET(plug->fd, &readfds); } if (!nfds) { log_and_exit("No plugins found in directory %s\n", plugin_dir); } nfds += 1; int ready = select(nfds, &readfds, NULL, NULL, &tmo); if (-1 == ready) { if (serve_state != RUNNING) { return; } else { err = errno; log_and_exit("Error on selecting Plugin: %s", strerror(err)); } } else if (ready > 0) { int fd = 0; for (fd = 0; fd < nfds; fd++) { if (FD_ISSET(fd, &readfds)) { int cfd = accept(fd, NULL, NULL); if (-1 != cfd) { struct plugin *p = plugin_lookup(fd); exec_plugin(p->file_path, cfd, p->require_root); } else { err = errno; info("Error on accepting request: %s", strerror(err)); } } } } child_cleanup(); } clean_up(); } /** * Main entry for daemon to work */ void serve(void) { while (serve_state != EXIT) { if (serve_state == RESTART) { info("Reloading plug-ins\n"); serve_state = RUNNING; } _serving(); } clean_up(); } int main(int argc, char *argv[]) { int c = 0; LIST_INIT(&head); /* Process command line arguments */ while (1) { static struct option l_options[] = { {"help", no_argument, 0, 'h'}, //Index 0 {"plugindir", required_argument, 0, 0}, //Index 1 {"socketdir", required_argument, 0, 0}, //Index 2 {"confdir", required_argument, 0, 0}, //Index 3 {0, 0, 0, 0} }; int option_index = 0; c = getopt_long(argc, argv, "hvd", l_options, &option_index); if (c == -1) { break; } switch (c) { case 0: switch (option_index) { case 1: plugin_dir = optarg; break; case 2: socket_dir = optarg; break; case 3: conf_dir = optarg; break; } break; case 'h': usage(); break; case 'v': verbose_flag = 1; break; case 'd': systemd = 1; break; case '?': break; default: abort(); } } /* Print any remaining command line arguments (not options). */ if (optind < argc) { printf("non-option ARGV-elements: "); while (optind < argc) { printf("%s \n", argv[optind++]); } printf("\n"); exit(1); } /* Setup syslog if needed */ if (!systemd) { openlog("lsmd", LOG_ODELAY, LOG_USER); } /* Check lsmd.conf */ char *lsmd_conf_path = path_form(conf_dir, LSMD_CONF_FILE); parse_conf_bool(lsmd_conf_path, LSM_CONF_ALLOW_ROOT_OPT_NAME, &allow_root_plugin); free(lsmd_conf_path); /* Check to see if we want to check plugin for memory errors */ if (getenv("LSM_VALGRIND")) { plugin_mem_debug = 1; } install_sh(); if (allow_root_plugin == 0) { drop_privileges(); } flight_check(); /* Become a daemon if told we are not using systemd */ if (!systemd) { if (-1 == daemon(0, 0)) { int err = errno; log_and_exit("Error on calling daemon: %s\n", strerror(err)); } } serve(); return EXIT_SUCCESS; } libstoragemgmt-1.2.3/daemon/Makefile.am0000664000175000017500000000064212537546122014770 00000000000000bin_PROGRAMS = lsmd EXTRA_DIST=lsm_rest.c lsm_rest.h lsmd_LDFLAGS=-Wl,-z,relro,-z,now -pie $(LIBCONFIG_LIBS) lsmd_CFLAGS=-fPIE -DPIE $(LIBCONFIG_CFLAGS) lsmd_SOURCES = lsm_daemon.c if WITH_REST_API bin_PROGRAMS += lsm_restd lsm_restd_LDFLAGS=$(LIBMICROHTTPD_LIBS) $(JSON_LIBS) $(LIBXML_LIBS) lsm_restd_CFLAGS=-fPIE -DPIE $(LIBMICROHTTPD_CFLAGS) $(JSON_CFLAGS) $(LIBXML_CFLAGS) lsm_restd_SOURCES= lsm_rest.c endif libstoragemgmt-1.2.3/daemon/Makefile.in0000664000175000017500000006201412542455445015005 00000000000000# Makefile.in generated by automake 1.13.4 from Makefile.am. # @configure_input@ # Copyright (C) 1994-2013 Free Software Foundation, Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__is_gnu_make = test -n '$(MAKEFILE_LIST)' && test -n '$(MAKELEVEL)' am__make_running_with_option = \ case $${target_option-} in \ ?) ;; \ *) echo "am__make_running_with_option: internal error: invalid" \ "target option '$${target_option-}' specified" >&2; \ exit 1;; \ esac; \ has_opt=no; \ sane_makeflags=$$MAKEFLAGS; \ if $(am__is_gnu_make); then \ sane_makeflags=$$MFLAGS; \ else \ case $$MAKEFLAGS in \ *\\[\ \ ]*) \ bs=\\; \ sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \ | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \ esac; \ fi; \ skip_next=no; \ strip_trailopt () \ { \ flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \ }; \ for flg in $$sane_makeflags; do \ test $$skip_next = yes && { skip_next=no; continue; }; \ case $$flg in \ *=*|--*) continue;; \ -*I) strip_trailopt 'I'; skip_next=yes;; \ -*I?*) strip_trailopt 'I';; \ -*O) strip_trailopt 'O'; skip_next=yes;; \ -*O?*) strip_trailopt 'O';; \ -*l) strip_trailopt 'l'; skip_next=yes;; \ -*l?*) strip_trailopt 'l';; \ -[dEDm]) skip_next=yes;; \ -[JT]) skip_next=yes;; \ esac; \ case $$flg in \ *$$target_option*) has_opt=yes; break;; \ esac; \ done; \ test $$has_opt = yes am__make_dryrun = (target_option=n; $(am__make_running_with_option)) am__make_keepgoing = (target_option=k; $(am__make_running_with_option)) pkgdatadir = $(datadir)/@PACKAGE@ pkgincludedir = $(includedir)/@PACKAGE@ pkglibdir = $(libdir)/@PACKAGE@ pkglibexecdir = $(libexecdir)/@PACKAGE@ am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ bin_PROGRAMS = lsmd$(EXEEXT) $(am__EXEEXT_1) @WITH_REST_API_TRUE@am__append_1 = lsm_restd subdir = daemon DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/Makefile.am \ $(top_srcdir)/build-aux/depcomp ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = $(top_srcdir)/m4/ax_python_module.m4 \ $(top_srcdir)/m4/libtool.m4 $(top_srcdir)/m4/ltoptions.m4 \ $(top_srcdir)/m4/ltsugar.m4 $(top_srcdir)/m4/ltversion.m4 \ $(top_srcdir)/m4/lt~obsolete.m4 $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) mkinstalldirs = $(install_sh) -d CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = CONFIG_CLEAN_VPATH_FILES = @WITH_REST_API_TRUE@am__EXEEXT_1 = lsm_restd$(EXEEXT) am__installdirs = "$(DESTDIR)$(bindir)" PROGRAMS = $(bin_PROGRAMS) am__lsm_restd_SOURCES_DIST = lsm_rest.c @WITH_REST_API_TRUE@am_lsm_restd_OBJECTS = \ @WITH_REST_API_TRUE@ lsm_restd-lsm_rest.$(OBJEXT) lsm_restd_OBJECTS = $(am_lsm_restd_OBJECTS) lsm_restd_LDADD = $(LDADD) AM_V_lt = $(am__v_lt_@AM_V@) am__v_lt_ = $(am__v_lt_@AM_DEFAULT_V@) am__v_lt_0 = --silent am__v_lt_1 = lsm_restd_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CC $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CCLD) $(lsm_restd_CFLAGS) \ $(CFLAGS) $(lsm_restd_LDFLAGS) $(LDFLAGS) -o $@ am_lsmd_OBJECTS = lsmd-lsm_daemon.$(OBJEXT) lsmd_OBJECTS = $(am_lsmd_OBJECTS) lsmd_LDADD = $(LDADD) lsmd_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CC $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CCLD) $(lsmd_CFLAGS) $(CFLAGS) \ $(lsmd_LDFLAGS) $(LDFLAGS) -o $@ AM_V_P = $(am__v_P_@AM_V@) am__v_P_ = $(am__v_P_@AM_DEFAULT_V@) am__v_P_0 = false am__v_P_1 = : AM_V_GEN = $(am__v_GEN_@AM_V@) am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@) am__v_GEN_0 = @echo " GEN " $@; am__v_GEN_1 = AM_V_at = $(am__v_at_@AM_V@) am__v_at_ = $(am__v_at_@AM_DEFAULT_V@) am__v_at_0 = @ am__v_at_1 = DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir) depcomp = $(SHELL) $(top_srcdir)/build-aux/depcomp am__depfiles_maybe = depfiles am__mv = mv -f COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \ $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) LTCOMPILE = $(LIBTOOL) $(AM_V_lt) --tag=CC $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) \ $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) \ $(AM_CFLAGS) $(CFLAGS) AM_V_CC = $(am__v_CC_@AM_V@) am__v_CC_ = $(am__v_CC_@AM_DEFAULT_V@) am__v_CC_0 = @echo " CC " $@; am__v_CC_1 = CCLD = $(CC) LINK = $(LIBTOOL) $(AM_V_lt) --tag=CC $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) \ $(AM_LDFLAGS) $(LDFLAGS) -o $@ AM_V_CCLD = $(am__v_CCLD_@AM_V@) am__v_CCLD_ = $(am__v_CCLD_@AM_DEFAULT_V@) am__v_CCLD_0 = @echo " CCLD " $@; am__v_CCLD_1 = SOURCES = $(lsm_restd_SOURCES) $(lsmd_SOURCES) DIST_SOURCES = $(am__lsm_restd_SOURCES_DIST) $(lsmd_SOURCES) am__can_run_installinfo = \ case $$AM_UPDATE_INFO_DIR in \ n|no|NO) false;; \ *) (install-info --version) >/dev/null 2>&1;; \ esac am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) # Read a list of newline-separated strings from the standard input, # and print each of them once, without duplicates. Input order is # *not* preserved. am__uniquify_input = $(AWK) '\ BEGIN { nonempty = 0; } \ { items[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in items) print i; }; } \ ' # Make sure the list of sources is unique. This is necessary because, # e.g., the same source file might be shared among _SOURCES variables # for different programs/libraries. am__define_uniq_tagged_files = \ list='$(am__tagged_files)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | $(am__uniquify_input)` ETAGS = etags CTAGS = ctags DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) ACLOCAL = @ACLOCAL@ AMTAR = @AMTAR@ AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@ AR = @AR@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ FGREP = @FGREP@ GREP = @GREP@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ JSON_CFLAGS = @JSON_CFLAGS@ JSON_LIBS = @JSON_LIBS@ LD = @LD@ LDFLAGS = @LDFLAGS@ LIBCHECK_CFLAGS = @LIBCHECK_CFLAGS@ LIBCHECK_LIBS = @LIBCHECK_LIBS@ LIBCONFIG_CFLAGS = @LIBCONFIG_CFLAGS@ LIBCONFIG_LIBS = @LIBCONFIG_LIBS@ LIBGLIB_CFLAGS = @LIBGLIB_CFLAGS@ LIBGLIB_LIBS = @LIBGLIB_LIBS@ LIBMICROHTTPD_CFLAGS = @LIBMICROHTTPD_CFLAGS@ LIBMICROHTTPD_LIBS = @LIBMICROHTTPD_LIBS@ LIBOBJS = @LIBOBJS@ LIBS = @LIBS@ LIBSM_LIBTOOL_VERSION = @LIBSM_LIBTOOL_VERSION@ LIBSM_MAJOR_VERSION = @LIBSM_MAJOR_VERSION@ LIBSM_MICRO_VERSION = @LIBSM_MICRO_VERSION@ LIBSM_MINOR_VERSION = @LIBSM_MINOR_VERSION@ LIBSM_VERSION = @LIBSM_VERSION@ LIBSM_VERSION_INFO = @LIBSM_VERSION_INFO@ LIBSM_VERSION_NUMBER = @LIBSM_VERSION_NUMBER@ LIBTOOL = @LIBTOOL@ LIBXML_CFLAGS = @LIBXML_CFLAGS@ LIBXML_LIBS = @LIBXML_LIBS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBOBJS = @LTLIBOBJS@ MAINT = @MAINT@ MAKEINFO = @MAKEINFO@ MANIFEST_TOOL = @MANIFEST_TOOL@ MKDIR_P = @MKDIR_P@ NM = @NM@ NMEDIT = @NMEDIT@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_URL = @PACKAGE_URL@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PERL = @PERL@ PKG_CONFIG = @PKG_CONFIG@ PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@ PKG_CONFIG_PATH = @PKG_CONFIG_PATH@ PYTHON = @PYTHON@ PYTHON_EXEC_PREFIX = @PYTHON_EXEC_PREFIX@ PYTHON_PLATFORM = @PYTHON_PLATFORM@ PYTHON_PREFIX = @PYTHON_PREFIX@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SSL_LIBS = @SSL_LIBS@ STRIP = @STRIP@ VERSION = @VERSION@ YAJL_LIBS = @YAJL_LIBS@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_AR = @ac_ct_AR@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ bashcompletiondir = @bashcompletiondir@ bindir = @bindir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ datadir = @datadir@ datarootdir = @datarootdir@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ install_sh = @install_sh@ libdir = @libdir@ libexecdir = @libexecdir@ localedir = @localedir@ localstatedir = @localstatedir@ mandir = @mandir@ mkdir_p = @mkdir_p@ oldincludedir = @oldincludedir@ pdfdir = @pdfdir@ pkgpyexecdir = @pkgpyexecdir@ pkgpythondir = @pkgpythondir@ prefix = @prefix@ program_transform_name = @program_transform_name@ psdir = @psdir@ pyexecdir = @pyexecdir@ pythondir = @pythondir@ sbindir = @sbindir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ systemdsystemunitdir = @systemdsystemunitdir@ target_alias = @target_alias@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ EXTRA_DIST = lsm_rest.c lsm_rest.h lsmd_LDFLAGS = -Wl,-z,relro,-z,now -pie $(LIBCONFIG_LIBS) lsmd_CFLAGS = -fPIE -DPIE $(LIBCONFIG_CFLAGS) lsmd_SOURCES = lsm_daemon.c @WITH_REST_API_TRUE@lsm_restd_LDFLAGS = $(LIBMICROHTTPD_LIBS) $(JSON_LIBS) $(LIBXML_LIBS) @WITH_REST_API_TRUE@lsm_restd_CFLAGS = -fPIE -DPIE $(LIBMICROHTTPD_CFLAGS) $(JSON_CFLAGS) $(LIBXML_CFLAGS) @WITH_REST_API_TRUE@lsm_restd_SOURCES = lsm_rest.c all: all-am .SUFFIXES: .SUFFIXES: .c .lo .o .obj $(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu daemon/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --gnu daemon/Makefile .PRECIOUS: Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): install-binPROGRAMS: $(bin_PROGRAMS) @$(NORMAL_INSTALL) @list='$(bin_PROGRAMS)'; test -n "$(bindir)" || list=; \ if test -n "$$list"; then \ echo " $(MKDIR_P) '$(DESTDIR)$(bindir)'"; \ $(MKDIR_P) "$(DESTDIR)$(bindir)" || exit 1; \ fi; \ for p in $$list; do echo "$$p $$p"; done | \ sed 's/$(EXEEXT)$$//' | \ while read p p1; do if test -f $$p \ || test -f $$p1 \ ; then echo "$$p"; echo "$$p"; else :; fi; \ done | \ sed -e 'p;s,.*/,,;n;h' \ -e 's|.*|.|' \ -e 'p;x;s,.*/,,;s/$(EXEEXT)$$//;$(transform);s/$$/$(EXEEXT)/' | \ sed 'N;N;N;s,\n, ,g' | \ $(AWK) 'BEGIN { files["."] = ""; dirs["."] = 1 } \ { d=$$3; if (dirs[d] != 1) { print "d", d; dirs[d] = 1 } \ if ($$2 == $$4) files[d] = files[d] " " $$1; \ else { print "f", $$3 "/" $$4, $$1; } } \ END { for (d in files) print "f", d, files[d] }' | \ while read type dir files; do \ if test "$$dir" = .; then dir=; else dir=/$$dir; fi; \ test -z "$$files" || { \ echo " $(INSTALL_PROGRAM_ENV) $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL_PROGRAM) $$files '$(DESTDIR)$(bindir)$$dir'"; \ $(INSTALL_PROGRAM_ENV) $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL_PROGRAM) $$files "$(DESTDIR)$(bindir)$$dir" || exit $$?; \ } \ ; done uninstall-binPROGRAMS: @$(NORMAL_UNINSTALL) @list='$(bin_PROGRAMS)'; test -n "$(bindir)" || list=; \ files=`for p in $$list; do echo "$$p"; done | \ sed -e 'h;s,^.*/,,;s/$(EXEEXT)$$//;$(transform)' \ -e 's/$$/$(EXEEXT)/' \ `; \ test -n "$$list" || exit 0; \ echo " ( cd '$(DESTDIR)$(bindir)' && rm -f" $$files ")"; \ cd "$(DESTDIR)$(bindir)" && rm -f $$files clean-binPROGRAMS: @list='$(bin_PROGRAMS)'; test -n "$$list" || exit 0; \ echo " rm -f" $$list; \ rm -f $$list || exit $$?; \ test -n "$(EXEEXT)" || exit 0; \ list=`for p in $$list; do echo "$$p"; done | sed 's/$(EXEEXT)$$//'`; \ echo " rm -f" $$list; \ rm -f $$list lsm_restd$(EXEEXT): $(lsm_restd_OBJECTS) $(lsm_restd_DEPENDENCIES) $(EXTRA_lsm_restd_DEPENDENCIES) @rm -f lsm_restd$(EXEEXT) $(AM_V_CCLD)$(lsm_restd_LINK) $(lsm_restd_OBJECTS) $(lsm_restd_LDADD) $(LIBS) lsmd$(EXEEXT): $(lsmd_OBJECTS) $(lsmd_DEPENDENCIES) $(EXTRA_lsmd_DEPENDENCIES) @rm -f lsmd$(EXEEXT) $(AM_V_CCLD)$(lsmd_LINK) $(lsmd_OBJECTS) $(lsmd_LDADD) $(LIBS) mostlyclean-compile: -rm -f *.$(OBJEXT) distclean-compile: -rm -f *.tab.c @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/lsm_restd-lsm_rest.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/lsmd-lsm_daemon.Po@am__quote@ .c.o: @am__fastdepCC_TRUE@ $(AM_V_CC)depbase=`echo $@ | sed 's|[^/]*$$|$(DEPDIR)/&|;s|\.o$$||'`;\ @am__fastdepCC_TRUE@ $(COMPILE) -MT $@ -MD -MP -MF $$depbase.Tpo -c -o $@ $< &&\ @am__fastdepCC_TRUE@ $(am__mv) $$depbase.Tpo $$depbase.Po @AMDEP_TRUE@@am__fastdepCC_FALSE@ $(AM_V_CC)source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCC_FALSE@ $(AM_V_CC@am__nodep@)$(COMPILE) -c -o $@ $< .c.obj: @am__fastdepCC_TRUE@ $(AM_V_CC)depbase=`echo $@ | sed 's|[^/]*$$|$(DEPDIR)/&|;s|\.obj$$||'`;\ @am__fastdepCC_TRUE@ $(COMPILE) -MT $@ -MD -MP -MF $$depbase.Tpo -c -o $@ `$(CYGPATH_W) '$<'` &&\ @am__fastdepCC_TRUE@ $(am__mv) $$depbase.Tpo $$depbase.Po @AMDEP_TRUE@@am__fastdepCC_FALSE@ $(AM_V_CC)source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCC_FALSE@ $(AM_V_CC@am__nodep@)$(COMPILE) -c -o $@ `$(CYGPATH_W) '$<'` .c.lo: @am__fastdepCC_TRUE@ $(AM_V_CC)depbase=`echo $@ | sed 's|[^/]*$$|$(DEPDIR)/&|;s|\.lo$$||'`;\ @am__fastdepCC_TRUE@ $(LTCOMPILE) -MT $@ -MD -MP -MF $$depbase.Tpo -c -o $@ $< &&\ @am__fastdepCC_TRUE@ $(am__mv) $$depbase.Tpo $$depbase.Plo @AMDEP_TRUE@@am__fastdepCC_FALSE@ $(AM_V_CC)source='$<' object='$@' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCC_FALSE@ $(AM_V_CC@am__nodep@)$(LTCOMPILE) -c -o $@ $< lsm_restd-lsm_rest.o: lsm_rest.c @am__fastdepCC_TRUE@ $(AM_V_CC)$(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(lsm_restd_CFLAGS) $(CFLAGS) -MT lsm_restd-lsm_rest.o -MD -MP -MF $(DEPDIR)/lsm_restd-lsm_rest.Tpo -c -o lsm_restd-lsm_rest.o `test -f 'lsm_rest.c' || echo '$(srcdir)/'`lsm_rest.c @am__fastdepCC_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/lsm_restd-lsm_rest.Tpo $(DEPDIR)/lsm_restd-lsm_rest.Po @AMDEP_TRUE@@am__fastdepCC_FALSE@ $(AM_V_CC)source='lsm_rest.c' object='lsm_restd-lsm_rest.o' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCC_FALSE@ $(AM_V_CC@am__nodep@)$(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(lsm_restd_CFLAGS) $(CFLAGS) -c -o lsm_restd-lsm_rest.o `test -f 'lsm_rest.c' || echo '$(srcdir)/'`lsm_rest.c lsm_restd-lsm_rest.obj: lsm_rest.c @am__fastdepCC_TRUE@ $(AM_V_CC)$(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(lsm_restd_CFLAGS) $(CFLAGS) -MT lsm_restd-lsm_rest.obj -MD -MP -MF $(DEPDIR)/lsm_restd-lsm_rest.Tpo -c -o lsm_restd-lsm_rest.obj `if test -f 'lsm_rest.c'; then $(CYGPATH_W) 'lsm_rest.c'; else $(CYGPATH_W) '$(srcdir)/lsm_rest.c'; fi` @am__fastdepCC_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/lsm_restd-lsm_rest.Tpo $(DEPDIR)/lsm_restd-lsm_rest.Po @AMDEP_TRUE@@am__fastdepCC_FALSE@ $(AM_V_CC)source='lsm_rest.c' object='lsm_restd-lsm_rest.obj' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCC_FALSE@ $(AM_V_CC@am__nodep@)$(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(lsm_restd_CFLAGS) $(CFLAGS) -c -o lsm_restd-lsm_rest.obj `if test -f 'lsm_rest.c'; then $(CYGPATH_W) 'lsm_rest.c'; else $(CYGPATH_W) '$(srcdir)/lsm_rest.c'; fi` lsmd-lsm_daemon.o: lsm_daemon.c @am__fastdepCC_TRUE@ $(AM_V_CC)$(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(lsmd_CFLAGS) $(CFLAGS) -MT lsmd-lsm_daemon.o -MD -MP -MF $(DEPDIR)/lsmd-lsm_daemon.Tpo -c -o lsmd-lsm_daemon.o `test -f 'lsm_daemon.c' || echo '$(srcdir)/'`lsm_daemon.c @am__fastdepCC_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/lsmd-lsm_daemon.Tpo $(DEPDIR)/lsmd-lsm_daemon.Po @AMDEP_TRUE@@am__fastdepCC_FALSE@ $(AM_V_CC)source='lsm_daemon.c' object='lsmd-lsm_daemon.o' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCC_FALSE@ $(AM_V_CC@am__nodep@)$(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(lsmd_CFLAGS) $(CFLAGS) -c -o lsmd-lsm_daemon.o `test -f 'lsm_daemon.c' || echo '$(srcdir)/'`lsm_daemon.c lsmd-lsm_daemon.obj: lsm_daemon.c @am__fastdepCC_TRUE@ $(AM_V_CC)$(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(lsmd_CFLAGS) $(CFLAGS) -MT lsmd-lsm_daemon.obj -MD -MP -MF $(DEPDIR)/lsmd-lsm_daemon.Tpo -c -o lsmd-lsm_daemon.obj `if test -f 'lsm_daemon.c'; then $(CYGPATH_W) 'lsm_daemon.c'; else $(CYGPATH_W) '$(srcdir)/lsm_daemon.c'; fi` @am__fastdepCC_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/lsmd-lsm_daemon.Tpo $(DEPDIR)/lsmd-lsm_daemon.Po @AMDEP_TRUE@@am__fastdepCC_FALSE@ $(AM_V_CC)source='lsm_daemon.c' object='lsmd-lsm_daemon.obj' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCC_FALSE@ $(AM_V_CC@am__nodep@)$(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(lsmd_CFLAGS) $(CFLAGS) -c -o lsmd-lsm_daemon.obj `if test -f 'lsm_daemon.c'; then $(CYGPATH_W) 'lsm_daemon.c'; else $(CYGPATH_W) '$(srcdir)/lsm_daemon.c'; fi` mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs ID: $(am__tagged_files) $(am__define_uniq_tagged_files); mkid -fID $$unique tags: tags-am TAGS: tags tags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files) set x; \ here=`pwd`; \ $(am__define_uniq_tagged_files); \ shift; \ if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ if test $$# -gt 0; then \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ "$$@" $$unique; \ else \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ $$unique; \ fi; \ fi ctags: ctags-am CTAGS: ctags ctags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files) $(am__define_uniq_tagged_files); \ test -z "$(CTAGS_ARGS)$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$unique GTAGS: here=`$(am__cd) $(top_builddir) && pwd` \ && $(am__cd) $(top_srcdir) \ && gtags -i $(GTAGS_ARGS) "$$here" cscopelist: cscopelist-am cscopelist-am: $(am__tagged_files) list='$(am__tagged_files)'; \ case "$(srcdir)" in \ [\\/]* | ?:[\\/]*) sdir="$(srcdir)" ;; \ *) sdir=$(subdir)/$(srcdir) ;; \ esac; \ for i in $$list; do \ if test -f "$$i"; then \ echo "$(subdir)/$$i"; \ else \ echo "$$sdir/$$i"; \ fi; \ done >> $(top_builddir)/cscope.files distclean-tags: -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags distdir: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done check-am: all-am check: check-am all-am: Makefile $(PROGRAMS) installdirs: for dir in "$(DESTDIR)$(bindir)"; do \ test -z "$$dir" || $(MKDIR_P) "$$dir"; \ done install: install-am install-exec: install-exec-am install-data: install-data-am uninstall: uninstall-am install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-am install-strip: if test -z '$(STRIP)'; then \ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ install; \ else \ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ fi mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-am clean-am: clean-binPROGRAMS clean-generic clean-libtool mostlyclean-am distclean: distclean-am -rm -rf ./$(DEPDIR) -rm -f Makefile distclean-am: clean-am distclean-compile distclean-generic \ distclean-tags dvi: dvi-am dvi-am: html: html-am html-am: info: info-am info-am: install-data-am: install-dvi: install-dvi-am install-dvi-am: install-exec-am: install-binPROGRAMS install-html: install-html-am install-html-am: install-info: install-info-am install-info-am: install-man: install-pdf: install-pdf-am install-pdf-am: install-ps: install-ps-am install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-am -rm -rf ./$(DEPDIR) -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-am mostlyclean-am: mostlyclean-compile mostlyclean-generic \ mostlyclean-libtool pdf: pdf-am pdf-am: ps: ps-am ps-am: uninstall-am: uninstall-binPROGRAMS .MAKE: install-am install-strip .PHONY: CTAGS GTAGS TAGS all all-am check check-am clean \ clean-binPROGRAMS clean-generic clean-libtool cscopelist-am \ ctags ctags-am distclean distclean-compile distclean-generic \ distclean-libtool distclean-tags distdir dvi dvi-am html \ html-am info info-am install install-am install-binPROGRAMS \ install-data install-data-am install-dvi install-dvi-am \ install-exec install-exec-am install-html install-html-am \ install-info install-info-am install-man install-pdf \ install-pdf-am install-ps install-ps-am install-strip \ installcheck installcheck-am installdirs maintainer-clean \ maintainer-clean-generic mostlyclean mostlyclean-compile \ mostlyclean-generic mostlyclean-libtool pdf pdf-am ps ps-am \ tags tags-am uninstall uninstall-am uninstall-binPROGRAMS # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: libstoragemgmt-1.2.3/daemon/lsm_rest.h0000664000175000017500000000374612537737032014747 00000000000000/* * Copyright (C) 2011-2013 Red Hat, Inc. * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; If not, see . * * Author: Gris Ge */ #ifndef LIBSTORAGEMGMT_REST_H #define LIBSTORAGEMGMT_REST_H #ifdef __cplusplus extern "C" { #endif #define LSM_REST_PORT 8888 #define LSM_REST_TMO 60000 #define LSM_SOCK_BUFF_LEN 4096 #define LSM_DEFAULT_ID 100 #define LSM_JSON_MIME "application/json" #define LSM_HEADER_LEN 10 #define LSM_API_VER_LEN 4 #define LSM_UDS_PATH_DEFAULT "/var/run/lsm/ipc" enum lsm_json_type { lsm_json_type_null, lsm_json_type_int, lsm_json_type_float, lsm_json_type_string, lsm_json_type_bool, lsm_json_type_array_str, }; static const char *lsm_query_strs[] = { "systems", "volumes", "pools", "disks", "fs", "access_groups", "initiators", }; typedef struct Parameter { const char *key_name; const void *value; enum lsm_json_type value_type; ssize_t array_len; // only useful for ARRAY_STR type. struct Parameter *next; } Parameter_t; typedef struct ParaList { Parameter_t *head; } ParaList_t; void para_list_init(ParaList_t *); int para_list_add(ParaList_t *, const char *, const void *, const enum lsm_json_type, const ssize_t); void para_list_free(ParaList_t *); json_object *para_to_json(const enum lsm_json_type, const void *, const ssize_t); json_object *para_list_to_json(ParaList_t *); #ifdef __cplusplus } #endif #endif /* LIBSTORAGEMGMT_REST_H */ libstoragemgmt-1.2.3/daemon/lsm_rest.c0000664000175000017500000003775412537737032014750 00000000000000/* * Copyright (C) 2011-2013 Red Hat, Inc. * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; If not, see . * * Author: Gris Ge */ #define _GNU_SOURCE #include #include #include #include #include #include #include #include #include #include #include #include #include #include "lsm_rest.h" /* TODO: MHD_get_connection_values() with MHD_GET_ARGUMENT_KIND to get all query argument TODO: Check malloc() return code */ void para_list_init(ParaList_t * para_list) { para_list->head = NULL; } int para_list_add(ParaList_t * para_list, const char *key_name, const void *value, const enum lsm_json_type value_type, const ssize_t array_len) { if (para_list == NULL) return -1; Parameter_t *new_para_node = (Parameter_t *) malloc(sizeof(Parameter_t)); new_para_node->key_name = key_name; new_para_node->value = value; new_para_node->value_type = value_type; new_para_node->array_len = array_len; new_para_node->next = NULL; if (para_list->head == NULL) { para_list->head = new_para_node; } else { Parameter_t *current = para_list->head; while (current->next != NULL) { current = current->next; } current->next = new_para_node; } return 0; } void para_list_free(ParaList_t * para_list) { if (para_list == NULL) return; if (para_list->head == NULL) { free(para_list); } else { Parameter_t *current = para_list->head; Parameter_t *next = current->next; free(current); while (next != NULL) { current = next; next = current->next; free(current); } free(para_list); } return; } json_object *para_to_json(const enum lsm_json_type value_type, const void *para_value, ssize_t array_len) { json_object *para_val_obj = NULL; switch (value_type) { case lsm_json_type_null: break; case lsm_json_type_int: para_val_obj = json_object_new_int64(*(int64_t *) para_value); break; case lsm_json_type_float: para_val_obj = json_object_new_double(*(double *) para_value); break; case lsm_json_type_string: para_val_obj = json_object_new_string((const char *) para_value); break; case lsm_json_type_bool: para_val_obj = json_object_new_boolean(*(json_bool *) para_value); break; case lsm_json_type_array_str: para_val_obj = json_object_new_array(); ssize_t i; for (i = 0; i < array_len; i++) { json_object *array_member = para_to_json(lsm_json_type_string, (void *) ((char **) para_value)[i], 0); json_object_array_add(para_val_obj, array_member); } break; default: break; } return para_val_obj; } json_object *para_list_to_json(ParaList_t * para_list) { Parameter_t *cur_node = para_list->head; if (cur_node == NULL) { return NULL; } json_object *jobj = json_object_new_object(); while (cur_node != NULL) { json_object_object_add(jobj, cur_node->key_name, para_to_json(cur_node->value_type, cur_node->value, cur_node->array_len)); cur_node = cur_node->next; } return jobj; } static int connect_socket(const char *uri_str, const char *plugin_dir, int *error_no) { int socket_fd = -1; xmlURIPtr uri_obj; uri_obj = xmlParseURI(uri_str); char *uri_scheme = NULL; if (uri_obj != NULL) { uri_scheme = strdup(uri_obj->scheme); xmlFreeURI(uri_obj); uri_obj = NULL; } else { *error_no = errno; return socket_fd; } char *plugin_file = NULL; if (asprintf(&plugin_file, "%s/%s", plugin_dir, uri_scheme) == -1) { free(uri_scheme); *error_no = ENOMEM; return socket_fd; } free(uri_scheme); socket_fd = socket(AF_UNIX, SOCK_STREAM, 0); if (socket_fd != -1) { struct sockaddr_un addr; memset(&addr, 0, sizeof(addr)); addr.sun_family = AF_UNIX; if (strlen(plugin_file) > (sizeof(addr.sun_path) - 1)) { socket_fd = -1; fprintf(stderr, "Plugin file path too long: %s, " "max is %zu", plugin_file, sizeof(addr.sun_path) - 1); } strcpy(addr.sun_path, plugin_file); free(plugin_file); if (connect(socket_fd, (struct sockaddr *) &addr, sizeof(addr)) != 0) { *error_no = errno; socket_fd = -1; } } else { *error_no = errno; } return socket_fd; } static int send_msg(int socket_fd, const char *msg, int *error_no) { int rc = -1; size_t len = strlen(msg); size_t new_msg_len = strlen(msg) + LSM_HEADER_LEN + 1; char *msg_with_header = (char *) malloc(new_msg_len); sprintf(msg_with_header, "%0*zu%s", LSM_HEADER_LEN, len, msg); ssize_t written = 0; new_msg_len -= 1; while (written < new_msg_len) { ssize_t wrote = send(socket_fd, msg_with_header + written, (new_msg_len - written), MSG_NOSIGNAL); if (wrote != -1) { written += wrote; } else { *error_no = errno; break; } } if ((written == new_msg_len) && *error_no == 0) { rc = 0; } free(msg_with_header); return rc; } static char *_recv_msg(int socket_fd, size_t count, int *error_no) { char buff[LSM_SOCK_BUFF_LEN]; size_t amount_read = 0; *error_no = 0; char *msg = malloc(count + 1); memset(msg, 0, count + 1); while (amount_read < count) { ssize_t rd = (ssize_t) recv(socket_fd, buff, MIN(sizeof(buff), count - amount_read), MSG_WAITALL); if (rd > 0) { memcpy(msg + amount_read, buff, rd); amount_read += rd; } else if (errno == EAGAIN) { printf("retry\n"); errno = 0; continue; // TODO: don't know why recv() don't block. } else { *error_no = errno; break; } } if (*error_no == 0) { msg[count] = '\0'; return msg; } else { fprintf(stderr, "recv() got error_no, : %d\n", *error_no); free(msg); return NULL; } } static char *recv_msg(int socket_fd, int *error_no) { *error_no = 0; char *msg_len_str = _recv_msg(socket_fd, LSM_HEADER_LEN, error_no); if (msg_len_str == NULL) { fprintf(stderr, "Failed to read the JSON length " "with error_no%d\n", *error_no); return NULL; } errno = 0; size_t msg_len = (size_t) strtoul(msg_len_str, NULL, 10); free(msg_len_str); if ((errno == ERANGE && (msg_len == LONG_MAX || msg_len == LONG_MIN)) || (errno != 0 && msg_len == 0)) { perror("strtol"); return NULL; } if (msg_len == 0) { fprintf(stderr, "No data needed to retrieve\n"); return NULL; } char *msg = _recv_msg(socket_fd, msg_len, error_no); if (msg == NULL) { fprintf(stderr, "Failed to retrieve data from socket " "with error_no %d\n", *error_no); return NULL; } return msg; } static char *rpc(int socket_fd, const char *method, ParaList_t * para_list, int *error_no) { *error_no = 0; json_object *jobj = json_object_new_object(); json_object_object_add(jobj, "method", json_object_new_string(method)); json_object *js_params = para_list_to_json(para_list); if (js_params != NULL) { json_object_object_add(jobj, "params", js_params); } json_object_object_add(jobj, "id", json_object_new_int(LSM_DEFAULT_ID)); const char *json_string = json_object_to_json_string_ext(jobj, JSON_C_TO_STRING_PRETTY); printf("Sending JSON to plugin:\n%s\n", json_string); // code_debug *error_no = 0; int rc = send_msg(socket_fd, json_string, error_no); json_object_put(jobj); if (rc != 0) { fprintf(stderr, "Got error when sending message to socket, " "rc=%d, error_no=%d\n", rc, *error_no); return NULL; } char *recv_json_string = NULL; recv_json_string = recv_msg(socket_fd, error_no); if (*error_no != 0) { printf("Got error when receiving message to socket," "error_no=%d\n", *error_no); free(recv_json_string); return NULL; } if (recv_json_string == NULL) { printf("No data retrieved\n"); return NULL; } json_object *recv_json = json_tokener_parse(recv_json_string); free(recv_json_string); json_object *result_json; if (!json_object_object_get_ex(recv_json, "result", &result_json)) { printf("No 'result' node in received JSON data"); json_object_put(recv_json); return NULL; } char *result_str; result_str = (char *) json_object_to_json_string_ext(result_json, JSON_C_TO_STRING_PRETTY); char *rc_msg = strdup(result_str); json_object_put(recv_json); return rc_msg; } static int plugin_startup(int socket_fd, const char *uri, const char *pass, int tmo) { printf("Starting the plugin\n"); int error_no = 0; enum lsm_json_type pass_type = lsm_json_type_string; if (pass == NULL) { pass_type = lsm_json_type_null; } ParaList_t *para_list = (ParaList_t *) malloc(sizeof(ParaList_t)); para_list_init(para_list); para_list_add(para_list, "uri", uri, lsm_json_type_string, 0); para_list_add(para_list, "password", pass, pass_type, 0); para_list_add(para_list, "timeout", &tmo, lsm_json_type_int, 0); char *msg = rpc(socket_fd, "plugin_register", para_list, &error_no); free(msg); para_list_free(para_list); return error_no; } static int plugin_shutdown(int socket_fd) { printf("Shutting down the plugin\n"); int error_no = 0; ParaList_t *para_list = (ParaList_t *) malloc(sizeof(ParaList_t)); para_list_init(para_list); static int lsm_flags = 0; para_list_add(para_list, "flags", &lsm_flags, lsm_json_type_int, 0); char *msg = rpc(socket_fd, "plugin_unregister", para_list, &error_no); free(msg); para_list_free(para_list); return error_no; } static char *v01_query(int socket_fd, const char *method, ParaList_t * para_list, int *error_no) { *error_no = 0; if (para_list == NULL) { para_list = (ParaList_t *) malloc(sizeof(ParaList_t)); para_list_init(para_list); } int lsm_flags = 0; para_list_add(para_list, "flags", &lsm_flags, lsm_json_type_int, 0); char *json_str = rpc(socket_fd, method, para_list, error_no); para_list_free(para_list); return json_str; } static char *lsm_api_0_1(struct MHD_Connection *connection, const char *uri, const char *pass, const char *url, const char *method, const char *upload_data) { const char *plugin_dir = getenv("LSM_UDS_PATH"); if (plugin_dir == NULL) { plugin_dir = LSM_UDS_PATH_DEFAULT; fprintf(stdout, "Using default LSM_UDS_PATH: %s\n", plugin_dir); } int error_no = 0; int socket_fd = connect_socket(uri, plugin_dir, &error_no); if (socket_fd == -1) { fprintf(stderr, "Failed to connecting to the socket for URI " "%s with error_no %d\n", uri, error_no); return NULL; } error_no = plugin_startup(socket_fd, uri, pass, LSM_REST_TMO); if (error_no != 0) { fprintf(stderr, "Failed to register plugin, " "error_no %d", error_no); plugin_shutdown(socket_fd); shutdown(socket_fd, 0); return NULL; } error_no = 0; char *json_msg = NULL; int i; int flag_found = 0; for (i = 0; i < sizeof(lsm_query_strs) / sizeof(char *); i++) { if (0 == strcmp(url, lsm_query_strs[i])) { flag_found = 1; json_msg = v01_query(socket_fd, lsm_query_strs[i], NULL, &error_no); break; } } if (flag_found == 0) { fprintf(stderr, "Not supported: %s\n", url); } if (error_no != 0) { fprintf(stderr, "Failed to call method %s(), error_no: %d\n", url, error_no); } error_no = plugin_shutdown(socket_fd); if (error_no != 0) { fprintf(stderr, "Failed to unregister plugin, " "error_no %d", error_no); } shutdown(socket_fd, 0); return json_msg; } static int answer_to_connection(void *cls, struct MHD_Connection *connection, const char *url, const char *method, const char *version, const char *upload_data, size_t * upload_data_size, void **con_cls) { printf("New '%s' request, URL: '%s'\n", method, url); // code_debug struct MHD_Response *response; if (0 != strcmp(method, "GET")) { return MHD_NO; } if (strlen(url) == 1) { return MHD_NO; } const char *uri = MHD_lookup_connection_value(connection, MHD_GET_ARGUMENT_KIND, "uri"); const char *pass = MHD_lookup_connection_value(connection, MHD_GET_ARGUMENT_KIND, "pass"); int ret; char api_version[LSM_API_VER_LEN + 1]; memcpy(api_version, url + 1, LSM_API_VER_LEN); // url + 1 is used to get rid of leading '/' api_version[LSM_API_VER_LEN] = '\0'; char *json_str = NULL; size_t url_no_api_ver_len = strlen(url) - strlen(api_version) - 1 - 1; // -1 -1 means remove two leading / // example: /v0.1/systems --change to--> systems char *url_no_api_ver = malloc(url_no_api_ver_len + 1); strcpy(url_no_api_ver, url + strlen(api_version) + 1 + 1); if (0 == strcmp(api_version, "v0.1")) { printf("v0.1 API request found\n"); // code_debug json_str = lsm_api_0_1(connection, uri, pass, url_no_api_ver, method, upload_data); free(url_no_api_ver); if (json_str == NULL) { return MHD_NO; } } else { free(url_no_api_ver); return MHD_NO; } response = MHD_create_response_from_buffer(strlen(json_str), (void *) json_str, MHD_RESPMEM_MUST_FREE); MHD_add_response_header(response, "Content-Type", LSM_JSON_MIME); ret = MHD_queue_response(connection, MHD_HTTP_OK, response); MHD_destroy_response(response); return ret; } int main(int argc, char **argv) { struct MHD_Daemon *daemon; daemon = MHD_start_daemon(MHD_USE_SELECT_INTERNALLY, LSM_REST_PORT, NULL, NULL, &answer_to_connection, NULL, MHD_OPTION_END); while (1) { sleep(60); } MHD_stop_daemon(daemon); return EXIT_SUCCESS; } libstoragemgmt-1.2.3/README0000664000175000017500000000021012537546122012340 00000000000000 libStorageMgmt : A library for storage management Full documentation can be found at: http://libstorage.github.io/libstoragemgmt-doc/ libstoragemgmt-1.2.3/ChangeLog0000664000175000017500000000000012542252305013220 00000000000000libstoragemgmt-1.2.3/libstoragemgmt.pc.in0000664000175000017500000000040112537546122015433 00000000000000prefix=@prefix@ exec_prefix=@exec_prefix@ libdir=@libdir@ includedir=@includedir@/libstoragemgmt Name: libstoragemgmt Version: @VERSION@ Description: Storage array management library Requires: Libs: -L${libdir} -lstoragemgmt @LIBS@ Cflags: -I${includedir} libstoragemgmt-1.2.3/c_binding/0000775000175000017500000000000012542455463013466 500000000000000libstoragemgmt-1.2.3/c_binding/util/0000775000175000017500000000000012542455463014443 500000000000000libstoragemgmt-1.2.3/c_binding/util/qparams.c0000664000175000017500000001276312537737032016203 00000000000000/* Copyright (C) 2007, 2009-2011 Red Hat, Inc. * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; If not, see . * * Authors: * Richard W.M. Jones * * Utility functions to help parse and assemble query strings. * * * * !!!NOTE!!!: Taken from libvirt and modified to remove libvirt coupling. */ #include #include #include #include #include "qparams.h" #include struct qparam_set * new_qparam_set (int init_alloc, ...) { va_list args; struct qparam_set *ps; const char *pname, *pvalue; if (init_alloc <= 0) init_alloc = 1; ps = (struct qparam_set *)calloc( 1, sizeof(*(ps))); if( !ps ) { return NULL; } ps->n = 0; ps->alloc = init_alloc; ps->p = (struct qparam *) calloc( ps->alloc, sizeof(*(ps->p))); if (!ps->p) { free(ps); return NULL; } va_start (args, init_alloc); while ((pname = va_arg (args, char *)) != NULL) { pvalue = va_arg (args, char *); if (append_qparam (ps, pname, pvalue) == -1) { free_qparam_set (ps); ps = NULL; break; } } va_end (args); return ps; } int append_qparams (struct qparam_set *ps, ...) { va_list args; const char *pname, *pvalue; int ret = 0; va_start (args, ps); while ((pname = va_arg (args, char *)) != NULL) { pvalue = va_arg (args, char *); if (append_qparam (ps, pname, pvalue) == -1) { ret = -1; break; } } va_end (args); return ret; } /* Ensure there is space to store at least one more parameter * at the end of the set. */ static int grow_qparam_set (struct qparam_set *ps) { if (ps->n >= ps->alloc) { void *tmp = realloc( ps->p, ps->alloc * 2); if( !tmp ) { return -1; } ps->p = (struct qparam *)tmp; ps->alloc *= 2; } return 0; } int append_qparam (struct qparam_set *ps, const char *name, const char *value) { char *pname, *pvalue; pname = strdup (name); if (!pname) { return -1; } pvalue = strdup (value); if (!pvalue) { free(pname); return -1; } if (grow_qparam_set (ps) == -1) { free(pname); free(pvalue); return -1; } ps->p[ps->n].name = pname; ps->p[ps->n].value = pvalue; ps->p[ps->n].ignore = 0; ps->n++; return 0; } void free_qparam_set (struct qparam_set *ps) { int i; for (i = 0; i < ps->n; ++i) { free(ps->p[i].name); free(ps->p[i].value); } free(ps->p); ps->p = NULL; free(ps); } struct qparam_set * qparam_query_parse (const char *query) { struct qparam_set *ps; const char *end, *eq; ps = new_qparam_set (0, NULL); if (!ps) { return NULL; } if (!query || query[0] == '\0') return ps; while (*query) { char *name = NULL, *value = NULL; /* Find the next separator, or end of the string. */ end = strchr (query, '&'); if (!end) end = strchr (query, ';'); if (!end) end = query + strlen (query); /* Find the first '=' character between here and end. */ eq = strchr (query, '='); if (eq && eq >= end) eq = NULL; /* Empty section (eg. "&&"). */ if (end == query) goto next; /* If there is no '=' character, then we have just "name" * and consistent with CGI.pm we assume value is "". */ else if (!eq) { name = xmlURIUnescapeString (query, end - query, NULL); if (!name) goto out_of_memory; } /* Or if we have "name=" here (works around annoying * problem when calling xmlURIUnescapeString with len = 0). */ else if (eq+1 == end) { name = xmlURIUnescapeString (query, eq - query, NULL); if (!name) goto out_of_memory; } /* If the '=' character is at the beginning then we have * "=value" and consistent with CGI.pm we _ignore_ this. */ else if (query == eq) goto next; /* Otherwise it's "name=value". */ else { name = xmlURIUnescapeString (query, eq - query, NULL); if (!name) goto out_of_memory; value = xmlURIUnescapeString (eq+1, end - (eq+1), NULL); if (!value) { free(name); goto out_of_memory; } } /* Append to the parameter set. */ if (append_qparam (ps, name, value ? value : "") == -1) { free(name); free(value); goto out_of_memory; } free(name); free(value); next: query = end; if (*query) query ++; /* skip '&' separator */ } return ps; out_of_memory: free_qparam_set (ps); return NULL; } libstoragemgmt-1.2.3/c_binding/util/qparams.h0000664000175000017500000000453612537737032016207 00000000000000/* * Copyright (C) 2010-2011 Red Hat, Inc. * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; If not, see . * * Authors: * Richard W.M. Jones * * Utility functions to help parse and assemble query strings. * * * !!!NOTE!!!: Taken from libvirt and modified to remove libvirt coupling. */ #ifndef _QPARAMS_H_ # define _QPARAMS_H_ #ifdef __cplusplus extern "C" { #endif #include "libstoragemgmt/libstoragemgmt_common.h" /** * * ATTRIBUTE_SENTINEL: * * * * Macro to check for NULL-terminated varargs lists * */ # ifndef ATTRIBUTE_SENTINEL # if __GNUC_PREREQ (4, 0) # define ATTRIBUTE_SENTINEL __attribute__((__sentinel__)) # else # define ATTRIBUTE_SENTINEL # endif # endif /** * Single web service query parameter 'name=value'. */ struct qparam { char *name; /**< Name (unescaped). */ char *value; /**< Value (unescaped). */ int ignore; /**< Ignore this field in qparam_get_query */ }; /** * Set of parameters. */ struct qparam_set { int n; /**< number of parameters used */ int alloc; /**< allocated space */ struct qparam *p; /**< array of parameters */ }; /* New parameter set. */ LSM_DLL_LOCAL struct qparam_set *new_qparam_set (int init_alloc, ...) ATTRIBUTE_SENTINEL; /* Appending parameters. */ LSM_DLL_LOCAL int append_qparams (struct qparam_set *ps, ...) ATTRIBUTE_SENTINEL; LSM_DLL_LOCAL int append_qparam (struct qparam_set *ps, const char *name, const char *value); /* Parse a query string into a parameter set. */ LSM_DLL_LOCAL struct qparam_set *qparam_query_parse (const char *query); LSM_DLL_LOCAL void free_qparam_set (struct qparam_set *ps); #ifdef __cplusplus } #endif #endif /* _QPARAMS_H_ */ libstoragemgmt-1.2.3/c_binding/lsm_convert.cpp0000664000175000017500000005537412537737032016462 00000000000000/* * Copyright (C) 2011-2014 Red Hat, Inc. * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; If not, see . * * Author: tasleson */ #include "lsm_convert.hpp" #include "libstoragemgmt/libstoragemgmt_accessgroups.h" #include "libstoragemgmt/libstoragemgmt_blockrange.h" #include "libstoragemgmt/libstoragemgmt_nfsexport.h" bool is_expected_object(Value & obj, std::string class_name) { if (obj.valueType() == Value::object_t) { std::map < std::string, Value > i = obj.asObject(); std::map < std::string, Value >::iterator iter = i.find("class"); if (iter != i.end() && iter->second.asString() == class_name) { return true; } } return false; } lsm_volume *value_to_volume(Value & vol) { lsm_volume *rc = NULL; if (is_expected_object(vol, CLASS_NAME_VOLUME)) { std::map < std::string, Value > v = vol.asObject(); rc = lsm_volume_record_alloc(v["id"].asString().c_str(), v["name"].asString().c_str(), v["vpd83"].asString().c_str(), v["block_size"].asUint64_t(), v["num_of_blocks"].asUint64_t(), v["admin_state"].asUint32_t(), v["system_id"].asString().c_str(), v["pool_id"].asString().c_str(), v["plugin_data"].asC_str()); } else { throw ValueException("value_to_volume: Not correct type"); } return rc; } Value volume_to_value(lsm_volume * vol) { if (LSM_IS_VOL(vol)) { std::map < std::string, Value > v; v["class"] = Value(CLASS_NAME_VOLUME); v["id"] = Value(vol->id); v["name"] = Value(vol->name); v["vpd83"] = Value(vol->vpd83); v["block_size"] = Value(vol->block_size); v["num_of_blocks"] = Value(vol->number_of_blocks); v["admin_state"] = Value(vol->admin_state); v["system_id"] = Value(vol->system_id); v["pool_id"] = Value(vol->pool_id); v["plugin_data"] = Value(vol->plugin_data); return Value(v); } return Value(); } int value_array_to_volumes(Value & volume_values, lsm_volume ** volumes[], uint32_t * count) { int rc = LSM_ERR_OK; try { *count = 0; if (Value::array_t == volume_values.valueType()) { std::vector < Value > vol = volume_values.asArray(); *count = vol.size(); if (vol.size()) { *volumes = lsm_volume_record_array_alloc(vol.size()); if (*volumes) { for (size_t i = 0; i < vol.size(); ++i) { (*volumes)[i] = value_to_volume(vol[i]); if (!((*volumes)[i])) { rc = LSM_ERR_NO_MEMORY; goto error; } } } else { rc = LSM_ERR_NO_MEMORY; } } } } catch(const ValueException & ve) { rc = LSM_ERR_LIB_BUG; goto error; } out: return rc; error: if (*volumes && *count) { lsm_volume_record_array_free(*volumes, *count); *volumes = NULL; *count = 0; } goto out; } lsm_disk *value_to_disk(Value & disk) { lsm_disk *rc = NULL; if (is_expected_object(disk, CLASS_NAME_DISK)) { std::map < std::string, Value > d = disk.asObject(); rc = lsm_disk_record_alloc(d["id"].asString().c_str(), d["name"].asString().c_str(), (lsm_disk_type) d["disk_type"].asInt32_t(), d["block_size"].asUint64_t(), d["num_of_blocks"].asUint64_t(), d["status"].asUint64_t(), d["system_id"].asString().c_str() ); } else { throw ValueException("value_to_disk: Not correct type"); } return rc; } Value disk_to_value(lsm_disk * disk) { if (LSM_IS_DISK(disk)) { std::map < std::string, Value > d; d["class"] = Value(CLASS_NAME_DISK); d["id"] = Value(disk->id); d["name"] = Value(disk->name); d["disk_type"] = Value(disk->disk_type); d["block_size"] = Value(disk->block_size); d["num_of_blocks"] = Value(disk->block_count); d["status"] = Value(disk->disk_status); d["system_id"] = Value(disk->system_id); return Value(d); } return Value(); } int value_array_to_disks(Value & disk_values, lsm_disk ** disks[], uint32_t * count) { int rc = LSM_ERR_OK; try { *count = 0; if (Value::array_t == disk_values.valueType()) { std::vector < Value > d = disk_values.asArray(); *count = d.size(); if (d.size()) { *disks = lsm_disk_record_array_alloc(d.size()); if (*disks) { for (size_t i = 0; i < d.size(); ++i) { (*disks)[i] = value_to_disk(d[i]); if (!((*disks)[i])) { rc = LSM_ERR_NO_MEMORY; goto error; } } } else { rc = LSM_ERR_NO_MEMORY; } } } } catch(const ValueException & ve) { rc = LSM_ERR_LIB_BUG; goto error; } out: return rc; error: if (*disks && *count) { lsm_disk_record_array_free(*disks, *count); *disks = NULL; *count = 0; } goto out; } lsm_pool *value_to_pool(Value & pool) { lsm_pool *rc = NULL; if (is_expected_object(pool, CLASS_NAME_POOL)) { std::map < std::string, Value > i = pool.asObject(); rc = lsm_pool_record_alloc(i["id"].asString().c_str(), i["name"].asString().c_str(), i["element_type"].asUint64_t(), i["unsupported_actions"].asUint64_t(), i["total_space"].asUint64_t(), i["free_space"].asUint64_t(), i["status"].asUint64_t(), i["status_info"].asString().c_str(), i["system_id"].asString().c_str(), i["plugin_data"].asC_str()); } else { throw ValueException("value_to_pool: Not correct type"); } return rc; } Value pool_to_value(lsm_pool * pool) { if (LSM_IS_POOL(pool)) { std::map < std::string, Value > p; p["class"] = Value(CLASS_NAME_POOL); p["id"] = Value(pool->id); p["name"] = Value(pool->name); p["element_type"] = Value(pool->element_type); p["unsupported_actions"] = Value(pool->unsupported_actions); p["total_space"] = Value(pool->total_space); p["free_space"] = Value(pool->free_space); p["status"] = Value(pool->status); p["status_info"] = Value(pool->status_info); p["system_id"] = Value(pool->system_id); p["plugin_data"] = Value(pool->plugin_data); return Value(p); } return Value(); } lsm_system *value_to_system(Value & system) { lsm_system *rc = NULL; if (is_expected_object(system, CLASS_NAME_SYSTEM)) { std::map < std::string, Value > i = system.asObject(); rc = lsm_system_record_alloc(i["id"].asString().c_str(), i["name"].asString().c_str(), i["status"].asUint32_t(), i["status_info"].asString().c_str(), i["plugin_data"].asC_str()); } else { throw ValueException("value_to_system: Not correct type"); } return rc; } Value system_to_value(lsm_system * system) { if (LSM_IS_SYSTEM(system)) { std::map < std::string, Value > s; s["class"] = Value(CLASS_NAME_SYSTEM); s["id"] = Value(system->id); s["name"] = Value(system->name); s["status"] = Value(system->status); s["status_info"] = Value(system->status_info); s["plugin_data"] = Value(system->plugin_data); return Value(s); } return Value(); } lsm_string_list *value_to_string_list(Value & v) { lsm_string_list *il = NULL; if (Value::array_t == v.valueType()) { std::vector < Value > vl = v.asArray(); uint32_t size = vl.size(); il = lsm_string_list_alloc(size); if (il) { for (uint32_t i = 0; i < size; ++i) { if (LSM_ERR_OK != lsm_string_list_elem_set(il, i, vl[i].asC_str())) { lsm_string_list_free(il); il = NULL; break; } } } } else { throw ValueException("value_to_string_list: Not correct type"); } return il; } Value string_list_to_value(lsm_string_list * sl) { std::vector < Value > rc; if (LSM_IS_STRING_LIST(sl)) { uint32_t size = lsm_string_list_size(sl); rc.reserve(size); for (uint32_t i = 0; i < size; ++i) { rc.push_back(Value(lsm_string_list_elem_get(sl, i))); } } return Value(rc); } lsm_access_group *value_to_access_group(Value & group) { lsm_string_list *il = NULL; lsm_access_group *ag = NULL; if (is_expected_object(group, CLASS_NAME_ACCESS_GROUP)) { std::map < std::string, Value > vAg = group.asObject(); il = value_to_string_list(vAg["init_ids"]); if (il) { ag = lsm_access_group_record_alloc(vAg["id"].asString().c_str(), vAg["name"].asString().c_str(), il, (lsm_access_group_init_type) vAg["init_type"].asInt32_t(), vAg["system_id"]. asString().c_str(), vAg["plugin_data"].asC_str()); } /* This stuff is copied in lsm_access_group_record_alloc */ lsm_string_list_free(il); } else { throw ValueException("value_to_access_group: Not correct type"); } return ag; } Value access_group_to_value(lsm_access_group * group) { if (LSM_IS_ACCESS_GROUP(group)) { std::map < std::string, Value > ag; ag["class"] = Value(CLASS_NAME_ACCESS_GROUP); ag["id"] = Value(group->id); ag["name"] = Value(group->name); ag["init_ids"] = Value(string_list_to_value(group->initiators)); ag["init_type"] = Value(group->init_type); ag["system_id"] = Value(group->system_id); ag["plugin_data"] = Value(group->plugin_data); return Value(ag); } return Value(); } int value_array_to_access_groups(Value & group, lsm_access_group ** ag_list[], uint32_t * count) { int rc = LSM_ERR_OK; try { std::vector < Value > ag = group.asArray(); *count = ag.size(); if (*count) { *ag_list = lsm_access_group_record_array_alloc(*count); if (*ag_list) { uint32_t i; for (i = 0; i < *count; ++i) { (*ag_list)[i] = value_to_access_group(ag[i]); if (!((*ag_list)[i])) { rc = LSM_ERR_NO_MEMORY; goto error; } } } else { rc = LSM_ERR_NO_MEMORY; } } } catch(const ValueException & ve) { rc = LSM_ERR_LIB_BUG; goto error; } out: return rc; error: if (*ag_list && *count) { lsm_access_group_record_array_free(*ag_list, *count); *ag_list = NULL; *count = 0; } goto out; } Value access_group_list_to_value(lsm_access_group ** group, uint32_t count) { std::vector < Value > rc; if (group && count) { uint32_t i; rc.reserve(count); for (i = 0; i < count; ++i) { rc.push_back(access_group_to_value(group[i])); } } return Value(rc); } lsm_block_range *value_to_block_range(Value & br) { lsm_block_range *rc = NULL; if (is_expected_object(br, CLASS_NAME_BLOCK_RANGE)) { std::map < std::string, Value > range = br.asObject(); rc = lsm_block_range_record_alloc(range["src_block"].asUint64_t(), range["dest_block"].asUint64_t(), range["block_count"].asUint64_t()); } else { throw ValueException("value_to_block_range: Not correct type"); } return rc; } Value block_range_to_value(lsm_block_range * br) { if (LSM_IS_BLOCK_RANGE(br)) { std::map < std::string, Value > r; r["class"] = Value(CLASS_NAME_BLOCK_RANGE); r["src_block"] = Value(br->source_start); r["dest_block"] = Value(br->dest_start); r["block_count"] = Value(br->block_count); return Value(r); } return Value(); } lsm_block_range **value_to_block_range_list(Value & brl, uint32_t * count) { lsm_block_range **rc = NULL; std::vector < Value > r = brl.asArray(); *count = r.size(); if (*count) { rc = lsm_block_range_record_array_alloc(*count); if (rc) { for (uint32_t i = 0; i < *count; ++i) { rc[i] = value_to_block_range(r[i]); if (!rc[i]) { lsm_block_range_record_array_free(rc, i); rc = NULL; break; } } } } return rc; } Value block_range_list_to_value(lsm_block_range ** brl, uint32_t count) { std::vector < Value > r; if (brl && count) { uint32_t i = 0; r.reserve(count); for (i = 0; i < count; ++i) { r.push_back(block_range_to_value(brl[i])); } } return Value(r); } lsm_fs *value_to_fs(Value & fs) { lsm_fs *rc = NULL; if (is_expected_object(fs, CLASS_NAME_FILE_SYSTEM)) { std::map < std::string, Value > f = fs.asObject(); rc = lsm_fs_record_alloc(f["id"].asString().c_str(), f["name"].asString().c_str(), f["total_space"].asUint64_t(), f["free_space"].asUint64_t(), f["pool_id"].asString().c_str(), f["system_id"].asString().c_str(), f["plugin_data"].asC_str()); } else { throw ValueException("value_to_fs: Not correct type"); } return rc; } Value fs_to_value(lsm_fs * fs) { if (LSM_IS_FS(fs)) { std::map < std::string, Value > f; f["class"] = Value(CLASS_NAME_FILE_SYSTEM); f["id"] = Value(fs->id); f["name"] = Value(fs->name); f["total_space"] = Value(fs->total_space); f["free_space"] = Value(fs->free_space); f["pool_id"] = Value(fs->pool_id); f["system_id"] = Value(fs->system_id); f["plugin_data"] = Value(fs->plugin_data); return Value(f); } return Value(); } lsm_fs_ss *value_to_ss(Value & ss) { lsm_fs_ss *rc = NULL; if (is_expected_object(ss, CLASS_NAME_FS_SNAPSHOT)) { std::map < std::string, Value > f = ss.asObject(); rc = lsm_fs_ss_record_alloc(f["id"].asString().c_str(), f["name"].asString().c_str(), f["ts"].asUint64_t(), f["plugin_data"].asC_str()); } else { throw ValueException("value_to_ss: Not correct type"); } return rc; } Value ss_to_value(lsm_fs_ss * ss) { if (LSM_IS_SS(ss)) { std::map < std::string, Value > f; f["class"] = Value(CLASS_NAME_FS_SNAPSHOT); f["id"] = Value(ss->id); f["name"] = Value(ss->name); f["ts"] = Value(ss->ts); f["plugin_data"] = Value(ss->plugin_data); return Value(f); } return Value(); } lsm_nfs_export *value_to_nfs_export(Value & exp) { lsm_nfs_export *rc = NULL; if (is_expected_object(exp, CLASS_NAME_FS_EXPORT)) { int ok = 0; lsm_string_list *root = NULL; lsm_string_list *rw = NULL; lsm_string_list *ro = NULL; std::map < std::string, Value > i = exp.asObject(); /* Check all the arrays for successful allocation */ root = value_to_string_list(i["root"]); if (root) { rw = value_to_string_list(i["rw"]); if (rw) { ro = value_to_string_list(i["ro"]); if (!ro) { lsm_string_list_free(rw); lsm_string_list_free(root); rw = NULL; root = NULL; } else { ok = 1; } } else { lsm_string_list_free(root); root = NULL; } } if (ok) { rc = lsm_nfs_export_record_alloc(i["id"].asC_str(), i["fs_id"].asC_str(), i["export_path"].asC_str(), i["auth"].asC_str(), root, rw, ro, i["anonuid"].asUint64_t(), i["anongid"].asUint64_t(), i["options"].asC_str(), i["plugin_data"].asC_str()); lsm_string_list_free(root); lsm_string_list_free(rw); lsm_string_list_free(ro); } } else { throw ValueException("value_to_nfs_export: Not correct type"); } return rc; } Value nfs_export_to_value(lsm_nfs_export * exp) { if (LSM_IS_NFS_EXPORT(exp)) { std::map < std::string, Value > f; f["class"] = Value(CLASS_NAME_FS_EXPORT); f["id"] = Value(exp->id); f["fs_id"] = Value(exp->fs_id); f["export_path"] = Value(exp->export_path); f["auth"] = Value(exp->auth_type); f["root"] = Value(string_list_to_value(exp->root)); f["rw"] = Value(string_list_to_value(exp->rw)); f["ro"] = Value(string_list_to_value(exp->ro)); f["anonuid"] = Value(exp->anonuid); f["anongid"] = Value(exp->anongid); f["options"] = Value(exp->options); f["plugin_data"] = Value(exp->plugin_data); return Value(f); } return Value(); } lsm_storage_capabilities *value_to_capabilities(Value & exp) { lsm_storage_capabilities *rc = NULL; if (is_expected_object(exp, CLASS_NAME_CAPABILITIES)) { const char *val = exp["cap"].asC_str(); rc = lsm_capability_record_alloc(val); } else { throw ValueException("value_to_capabilities: Not correct type"); } return rc; } Value capabilities_to_value(lsm_storage_capabilities * cap) { if (LSM_IS_CAPABILITIY(cap)) { std::map < std::string, Value > c; char *t = capability_string(cap); c["class"] = Value(CLASS_NAME_CAPABILITIES); c["cap"] = Value(t); free(t); return Value(c); } return Value(); } lsm_target_port *value_to_target_port(Value & tp) { lsm_target_port *rc = NULL; if (is_expected_object(tp, CLASS_NAME_TARGET_PORT)) { rc = lsm_target_port_record_alloc(tp["id"].asC_str(), (lsm_target_port_type) tp["port_type"].asInt32_t(), tp["service_address"].asC_str(), tp["network_address"].asC_str(), tp["physical_address"].asC_str(), tp["physical_name"].asC_str(), tp["system_id"].asC_str(), tp["plugin_data"].asC_str()); } else { throw ValueException("value_to_target_port: Not correct type"); } return rc; } Value target_port_to_value(lsm_target_port * tp) { if (LSM_IS_TARGET_PORT(tp)) { std::map < std::string, Value > p; p["class"] = Value(CLASS_NAME_TARGET_PORT); p["id"] = Value(tp->id); p["port_type"] = Value(tp->port_type); p["service_address"] = Value(tp->service_address); p["network_address"] = Value(tp->network_address); p["physical_address"] = Value(tp->physical_address); p["physical_name"] = Value(tp->physical_name); p["system_id"] = Value(tp->system_id); p["plugin_data"] = Value(tp->plugin_data); return Value(p); } return Value(); } int values_to_uint32_array(Value & value, uint32_t ** uint32_array, uint32_t * count) { int rc = LSM_ERR_OK; *count = 0; try { std::vector < Value > data = value.asArray(); *count = data.size(); if (*count) { *uint32_array = (uint32_t *) malloc(sizeof(uint32_t) * *count); if (*uint32_array) { uint32_t i; for (i = 0; i < *count; i++) { (*uint32_array)[i] = data[i].asUint32_t(); } } else { rc = LSM_ERR_NO_MEMORY; } } } catch(const ValueException & ve) { if (*count) { free(*uint32_array); *uint32_array = NULL; *count = 0; } rc = LSM_ERR_LIB_BUG; } return rc; } Value uint32_array_to_value(uint32_t * uint32_array, uint32_t count) { std::vector < Value > rc; if (uint32_array && count) { uint32_t i; rc.reserve(count); for (i = 0; i < count; i++) { rc.push_back(uint32_array[i]); } } return rc; } libstoragemgmt-1.2.3/c_binding/lsm_datatypes.hpp0000664000175000017500000003242712537737032016777 00000000000000/* * Copyright (C) 2011-2014 Red Hat, Inc. * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; If not, see . * * Author: tasleson */ #ifndef LSM_DATATYPES_H #define LSM_DATATYPES_H #include "libstoragemgmt/libstoragemgmt_plug_interface.h" #include "libstoragemgmt/libstoragemgmt_common.h" #include "libxml/uri.h" #include #include "lsm_ipc.hpp" #ifdef __cplusplus extern "C" { #endif /* Helper macros to ease getter construction *//* Implementation for generic getter */ #define MEMBER_FUNC_GET(return_type, name, param_sig, x, validation, member, error) \ return_type name( param_sig ) {\ if( validation(x) ) { \ return x->member; \ } else { \ return error; \ } \ } \ #define MAGIC_CHECK(obj, m) ((obj) && \ ((obj)->magic==(m) )) #define LSM_DEL_MAGIC(obj) ((obj & 0x0FFFFFFF) | 0xD0000000) #define LSM_VOL_MAGIC 0xAA7A0000 #define LSM_IS_VOL(obj) MAGIC_CHECK(obj, LSM_VOL_MAGIC) #define LSM_FLAG_UNUSED_CHECK(x) ( x != 0 ) #define LSM_FLAG_GET_VALUE(x) x["flags"].asUint64_t() #define LSM_FLAG_EXPECTED_TYPE(x) (Value::numeric_t == x["flags"].valueType()) /** * Information about storage volumes. */ struct LSM_DLL_LOCAL _lsm_volume { uint32_t magic; char *id; /**< System wide unique identifier */ char *name; /**< Human recognizeable name */ char *vpd83; /**< SCSI page 83 unique ID */ uint64_t block_size; /**< Block size */ uint64_t number_of_blocks; /**< Number of blocks */ uint32_t admin_state; /**< Status */ char *system_id; /**< System this volume belongs */ char *pool_id; /**< Pool this volume is derived from */ char *plugin_data; /**< Private data for plugin */ }; #define LSM_POOL_MAGIC 0xAA7A0001 #define LSM_IS_POOL(obj) MAGIC_CHECK(obj, LSM_POOL_MAGIC) /** * Information about storage pools. */ struct LSM_DLL_LOCAL _lsm_pool { uint32_t magic; /**< Used for verfication */ char *id; /**< System wide unique identifier */ char *name; /**< Human recognizeable name */ uint64_t element_type; /**< What the pool can be used for */ uint64_t unsupported_actions; /**< What pool cannot be used for */ uint64_t total_space; /**< Total size */ uint64_t free_space; /**< Free space available */ uint64_t status; /**< Status of pool */ char *status_info; /**< Status info for pool */ char *system_id; /**< system id */ char *plugin_data; /**< Private data for plugin */ }; #define LSM_ACCESS_GROUP_MAGIC 0xAA7A0003 #define LSM_IS_ACCESS_GROUP(obj) MAGIC_CHECK(obj, LSM_ACCESS_GROUP_MAGIC) /** * Information pertaining to a storage group. */ struct _lsm_access_group { uint32_t magic; /**< Used for verification */ char *id; /**< Id */ char *name; /**< Name */ char *system_id; /**< System id */ lsm_access_group_init_type init_type; /**< Init type */ lsm_string_list *initiators; /**< List of initiators */ char *plugin_data; /**< Reserved for the plugin to use */ }; #define LSM_NFS_EXPORT_MAGIC 0xAA7A0006 #define LSM_IS_NFS_EXPORT(obj) MAGIC_CHECK(obj, LSM_NFS_EXPORT_MAGIC) /** * Structure for NFS export information */ struct _lsm_nfs_export { uint32_t magic; /**< Used for verfication */ char *id; /**< Id */ char *fs_id; /**< File system id */ char *export_path; /**< Export path */ char *auth_type; /**< Supported authentication types */ lsm_string_list *root; /**< List of hosts with root access */ lsm_string_list *rw; /**< List of hosts with read & write access */ lsm_string_list *ro; /**< List of hosts with read only access */ uint64_t anonuid; /**< Uid that should map to anonymous */ uint64_t anongid; /**< Gid that should map to anonymous */ char *options; /**< Options */ char *plugin_data; /**< Reserved for the plugin to use */ }; #define LSM_BLOCK_RANGE_MAGIC 0xAA7A0007 #define LSM_IS_BLOCK_RANGE(obj) MAGIC_CHECK(obj, LSM_BLOCK_RANGE_MAGIC) /** * Structure for block range ( a region to be replicated ) */ struct _lsm_block_range { uint32_t magic; /**< Used for verification */ uint64_t source_start; /**< Source address */ uint64_t dest_start; /**< Dest address */ uint64_t block_count; /**< Number of blocks */ }; #define LSM_CAPABILITIES_MAGIC 0xAA7A0008 #define LSM_IS_CAPABILITIY(obj) MAGIC_CHECK(obj, LSM_CAPABILITIES_MAGIC) #define LSM_CAP_MAX 512 /** * Capabilities of the plug-in and storage array. */ struct _lsm_storage_capabilities { uint32_t magic; /**< Used for verification */ uint32_t len; /**< Len of cap field */ uint8_t *cap; /**< Capacity data */ }; #define LSM_SYSTEM_MAGIC 0xAA7A0009 #define LSM_IS_SYSTEM(obj) MAGIC_CHECK(obj, LSM_SYSTEM_MAGIC) /** * Structure for a system */ struct _lsm_system { uint32_t magic; /**< Used for verification */ char *id; /**< Id */ char *name; /**< Name */ uint32_t status; /**< Enumerated status value */ char *status_info; /**< System status text */ char *plugin_data; /**< Reserved for the plugin to use */ }; #define LSM_CONNECT_MAGIC 0xAA7A000A #define LSM_IS_CONNECT(obj) MAGIC_CHECK(obj, LSM_CONNECT_MAGIC) #define LSM_PLUGIN_MAGIC 0xAA7A000B #define LSM_IS_PLUGIN(obj) MAGIC_CHECK(obj, LSM_PLUGIN_MAGIC) /** * Information pertaining to the plug-in specifics. */ struct LSM_DLL_LOCAL _lsm_plugin { uint32_t magic; /**< Magic, used for structure validation */ Ipc *tp; /**< IPC transport */ char *desc; /**< Description */ char *version; /**< Version */ void *private_data; /**< Private data for plug-in */ lsm_error *error; /**< Error information */ lsm_plugin_register reg; /**< Plug-in registration */ lsm_plugin_unregister unreg; /**< Plug-in unregistration */ struct lsm_mgmt_ops_v1 *mgmt_ops; /**< Callback for management ops */ struct lsm_san_ops_v1 *san_ops; /**< Callbacks for SAN ops */ struct lsm_nas_ops_v1 *nas_ops; /**< Callbacks for NAS ops */ struct lsm_fs_ops_v1 *fs_ops; /**< Callbacks for fs ops */ struct lsm_ops_v1_2 *ops_v1_2; /**< Callbacks for v1.2 ops */ }; /** * Information pertaining to the connection. This is the main structure and * opaque data type for the library. */ struct LSM_DLL_LOCAL _lsm_connect { uint32_t magic; /**< Magic, used for structure validation */ uint32_t flags; /**< Flags for the connection */ xmlURIPtr uri; /**< URI */ char *raw_uri; /**< Raw URI string */ lsm_error *error; /**< Error information */ Ipc *tp; /**< IPC transport */ }; #define LSM_ERROR_MAGIC 0xAA7A000C #define LSM_IS_ERROR(obj) MAGIC_CHECK(obj, LSM_ERROR_MAGIC) /** * Used to house error information. */ struct LSM_DLL_LOCAL _lsm_error { uint32_t magic; /**< Magic, used for struct validation */ lsm_error_number code; /**< Error code */ uint32_t reserved; /**< Reserved */ char *message; /**< Human readable error message */ char *exception; /**< Exception message if present */ char *debug; /**< Debug message */ void *debug_data; /**< Debug data */ uint32_t debug_data_size; /**< Size of the data */ }; /** * Used to house string collection. */ #define LSM_STRING_LIST_MAGIC 0xAA7A000D #define LSM_IS_STRING_LIST(obj) MAGIC_CHECK(obj, LSM_STRING_LIST_MAGIC) struct LSM_DLL_LOCAL _lsm_string_list { uint32_t magic; /**< Magic value */ GPtrArray *values; }; /** * Structure for File system information. */ #define LSM_FS_MAGIC 0xAA7A000E #define LSM_IS_FS(obj) MAGIC_CHECK(obj, LSM_FS_MAGIC) struct LSM_DLL_LOCAL _lsm_fs { uint32_t magic; /**< Magic, used for struct validation */ char *id; /**< Id */ char *name; /**< Name */ char *pool_id; /**< Pool ID */ uint64_t total_space; /**< Total space */ uint64_t free_space; /**< Free space */ char *system_id; /**< System ID */ char *plugin_data; /**< Plugin private data */ }; #define LSM_SS_MAGIC 0xAA7A000F #define LSM_IS_SS(obj) MAGIC_CHECK(obj, LSM_SS_MAGIC) struct LSM_DLL_LOCAL _lsm_fs_ss { uint32_t magic; char *id; char *name; uint64_t ts; char *plugin_data; /**< Reserved for the plugin to use */ }; #define LSM_DISK_MAGIC 0xAA7A0010 #define LSM_IS_DISK(obj) MAGIC_CHECK(obj, LSM_DISK_MAGIC) struct LSM_DLL_LOCAL _lsm_disk { uint32_t magic; char *id; char *name; lsm_disk_type disk_type; uint64_t block_size; uint64_t block_count; uint64_t disk_status; /* Bit field */ char *system_id; }; #define LSM_HASH_MAGIC 0xAA7A0011 #define LSM_IS_HASH(obj) MAGIC_CHECK(obj, LSM_HASH_MAGIC) struct LSM_DLL_LOCAL _lsm_hash { uint32_t magic; GHashTable *data; }; #define LSM_TARGET_PORT_MAGIC 0xAA7A0012 #define LSM_IS_TARGET_PORT(obj) MAGIC_CHECK(obj, LSM_TARGET_PORT_MAGIC) struct LSM_DLL_LOCAL _lsm_target_port { uint32_t magic; char *id; lsm_target_port_type port_type; char *service_address; char *network_address; char *physical_address; char *physical_name; char *system_id; char *plugin_data; }; /** * Returns a pointer to a newly created connection structure. * @return NULL on memory exhaustion, else new connection. */ lsm_connect LSM_DLL_LOCAL *connection_get(); /** * De-allocates the connection. * @param c Connection to free. */ void LSM_DLL_LOCAL connection_free(lsm_connect * c); /** * Loads the requester driver specified in the uri. * @param c Connection * @param plugin Short name of plugin * @param password Password * @param timeout Initial timeout * @param e Error data * @param startup If non zero call rpc start_up, else skip * @param flags Reserved flag for future use * @return LSM_ERR_OK on success, else error code. */ int LSM_DLL_LOCAL driver_load(lsm_connect * c, const char *plugin, const char *password, uint32_t timeout, lsm_error_ptr * e, int startup, lsm_flag flags); char LSM_DLL_LOCAL *capability_string(lsm_storage_capabilities * c); const char LSM_DLL_LOCAL *uds_path(void); /** * Take a character string and tries to convert to a number. * Note: Number is defined as what is acceptable for JSON number. The number * is represented by int64_t if possible, else uint64_t and then long double. * @param str_num Character string containing number * @param si Signed 64 bit number * @param ui Unsigned 64 bit number * @param d Long double * @return -1 = Invalid string pointer * 0 = Not a number * 1 = Number converted to signed integer, value in si * 2 = Number converted to unsigned integer, value in ui * 3 = Number converted to long double, value in d */ int LSM_DLL_LOCAL number_convert(const char *str_num, int64_t * si, uint64_t * ui, long double *d); /** * Validates an iSCSI IQN * @param iqn iSCSI iqn to check * @return LSM_ERR_OK on success, else LSM_ERR_INVALID_ARGUMENT */ int LSM_DLL_LOCAL iqn_validate(const char *iqn); /** * Validates an WWPN * @param wwpn wwpn to check * @return LSM_ERR_OK on success, else LSM_ERR_INVALID_ARGUMENT */ int LSM_DLL_LOCAL wwpn_validate(const char *wwpn); /** * Given a WWPN validate it and then convert to internal representation. * @param wwpn World wide port name to validate * @return NULL if not patch, else string with common lsm format */ char LSM_DLL_LOCAL *wwpn_convert(const char *wwpn); #ifdef __cplusplus } #endif #endif /* LSM_DATATYPES_H */ libstoragemgmt-1.2.3/c_binding/lsm_ipc.hpp0000664000175000017500000002602612537737032015552 00000000000000/* * Copyright (C) 2011-2014 Red Hat, Inc. * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; If not, see . * * Author: tasleson */ #ifndef LSM_IPC_H #define LSM_IPC_H #include "libstoragemgmt/libstoragemgmt_common.h" #include #include #include #include #include #include #include #include //Common serialization /** * Sends and receives payloads, unaware of the contents. * Notes: Not thread safe. i.e. you cannot share the same object with two or * more threads. */ class LSM_DLL_LOCAL Transport { public: /** * Size of the header which immediately proceeds the payload. */ const static int HDR_LEN = 10; /** * Empty ctor. * @return */ Transport(); /** * Class ctor * @param socket_desc Connected socket descriptor. */ Transport(int socket_desc); /** * Class dtor */ ~Transport(); /** * Sends a message over the transport. * @param[in] msg The message to be sent. * @param[out] error_code Errno (only valid if we return -1) * @return 0 on success, else -1 */ int msg_send(const std::string & msg, int &error_code); /** * Received a message over the transport. * Note: A zero read indicates that the transport was closed by other side, * no error code will be set in that case. * @param error_code (0 on success, else errno) * @return Message on success else 0 size with error_code set (not if EOF) */ std::string msg_recv(int &error_code); /** * Creates a connected socket (AF_UNIX) to the specified path * @param path of the AF_UNIX file to be used for IPC * @param error_code Error reason for the failure (errno) * @return -1 on error, else connected socket. */ static int socket_get(const std::string & path, int &error_code); /** * Closes the transport, called in the destructor if not done in advance. * @return 0 on success, else EBADF, EINTR, EIO. */ int close(); private: int s; //Socket descriptor }; /** * Generic function to convert Type v into a string. * @param v Template type T * @return string representation */ template < class Type > static std::string to_string(Type v) { std::stringstream out; out << v; return out.str(); } /** * Class that represents an EOF condition * @param m Message */ class LSM_DLL_LOCAL EOFException:public std::runtime_error { public: EOFException(std::string m); }; /** * User defined class for Value errors during serialize / de-serialize. */ class LSM_DLL_LOCAL ValueException:public std::runtime_error { public: /** * Constructor * @param m Exception message */ ValueException(std::string m); }; /** * User defined class for errors */ class LSM_DLL_LOCAL LsmException:public std::runtime_error { public: /** * Constructor * @param code Error code * @param msg Error message */ LsmException(int code, std::string & msg); /** * Constructor * @param code Error code * @param msg Error message * @param debug_addl Additional debug data */ LsmException(int code, std::string & msg, const std::string & debug_addl); /** * Constructor * @param code Error code * @param msg Error message * @param debug_addl Additional debug * @param debug_data_addl Additional debug data */ LsmException(int code, std::string & msg, const std::string & debug_addl, const std::string & debug_data_addl); /** * Destructor */ ~LsmException() throw(); int error_code; std::string debug; std::string debug_data; }; /** * Represents a value in the serialization. */ class LSM_DLL_LOCAL Value { public: /** * Different types this class can hold. */ enum value_type { null_t, boolean_t, string_t, numeric_t, object_t, array_t }; /** * Default constructor creates a "null" type */ Value(void); /** * Boolean constructor * @param v value */ Value(bool v); /** * Numeric double constructor. * @param v value */ Value(double v); Value(long double v); /** * Numeric unsigned 32 constructor * @param v value */ Value(uint32_t v); /** * Numeric signed 32 constructor * @param v value */ Value(int32_t v); /** * Numeric unsigned 64 constructor. * @param v value */ Value(uint64_t v); /** * Numeric signed 64 constructor. * @param v value */ Value(int64_t v); /** * Constructor in which you specify type and initial value as string. * @param type Type this object will hold. * @param v value */ Value(value_type type, const std::string & v); /** * Constructor for char * i.e. string. * @param v value */ Value(const char *v); /** * Constructor for std::string * @param v value */ Value(const std::string & v); /** * Constructor for object type * @param v values */ Value(const std::map < std::string, Value > &v); /** * Constructor for array type * @param v array values */ Value(const std::vector < Value > &v); /** * Serialize Value to json * @return */ std::string serialize(void); /** * Returns the enumerated type represented by object * @return enumerated type */ value_type valueType() const; /** * Overloaded operator for map access * @param key * @return Value */ Value & operator[] (const std::string & key); /** * Overloaded operator for vector(array) access * @param i * @return Value */ Value & operator[] (uint32_t i); /** * Returns true if value has a key in key/value pair * @return true if key exists, else false. */ bool hasKey(const std::string & k); /** * Checks to see if a Value contains a valid request * @return True if it is a request, else false */ bool isValidRequest(void); /** * Given a key returns the value. * @param key * @return Value */ Value getValue(const char *key); /** * Returns a numeric as the string holding it. */ const char *asNumString(); /** * Returns NULL if void type, else ValueException * @return NULL */ void *asVoid(); /** * Boolean value represented by object. * @return true, false ValueException on error */ bool asBool(); /** * Double value represented by object. * @return double value else ValueException on error */ double asDouble(); long double asLongDouble(); /** * Signed 32 integer value represented by object. * @return integer value else ValueException on error */ int32_t asInt32_t(); /** * Signed 64 integer value represented by object. * @return integer value else ValueException on error */ int64_t asInt64_t(); /** * Unsigned 32 integer value represented by object. * @return integer value else ValueException on error */ uint32_t asUint32_t(); /** * Unsigned 64 integer value represented by object. * @return integer value else ValueException on error */ uint64_t asUint64_t(); /** * String value represented by object. * @return string value else ValueException on error */ std::string asString(); /** * Return string as a pointer to a character array * @return */ const char *asC_str(); /** * key/value represented by object. * @return map of key and values else ValueException on error */ std::map < std::string, Value > asObject(); /** * vector of values represented by object. * @return vector of array values else ValueException on error */ std::vector < Value > asArray(); private: value_type t; std::string s; std::map < std::string, Value > obj; std::vector < Value > array; void marshal(yajl_gen g); }; /** * Serialize, de-serialize methods. */ class LSM_DLL_LOCAL Payload { public: /** * Given a Value returns json representation. * @param v Value to serialize * @return String representation */ static std::string serialize(Value & v); /** * Given a json string return a Value * @param json String to de-serialize * @return Value */ static Value deserialize(const std::string & json); }; class LSM_DLL_LOCAL Ipc { public: /** * Constructor */ Ipc(); /** * Constructor that takes a file descriptor * @param fd File descriptor to use */ Ipc(int fd); /** * Constructor that takes a socket path * @param socket_path Unix domain socket */ Ipc(std::string socket_path); /** * Destructor */ ~Ipc(); /** * Send a request over IPC * @param request IPC function name * @param params Parameters * @param id Request ID */ void requestSend(const std::string request, const Value & params, int32_t id = 100); /** * Reads a request * @returns Value */ Value readRequest(void); /** * Send a response to a request * @param response Response value * @param id Id that matches request */ void responseSend(const Value & response, uint32_t id = 100); /** * Read a response * @return Value of response */ Value responseRead(); /** * Send an error * @param error_code Error code * @param msg Error message * @param debug Debug data * @param id Id that matches request */ void errorSend(int error_code, std::string msg, std::string debug, uint32_t id = 100); /** * Do a remote procedure call (Request with a returned response * @param request Function method * @param params Function parameters * @param id Id of request * @return Result of the operation. */ Value rpc(const std::string & request, const Value & params, int32_t id = 100); private: Transport t; }; #endif libstoragemgmt-1.2.3/c_binding/lsm_datatypes.cpp0000664000175000017500000015633112537737032016773 00000000000000/* * Copyright (C) 2011-2014 Red Hat, Inc. * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; If not, see . * * Author: tasleson */ #ifndef __cplusplus #define _GNU_SOURCE #endif #include #include "lsm_datatypes.hpp" #include "libstoragemgmt/libstoragemgmt_accessgroups.h" #include "libstoragemgmt/libstoragemgmt_common.h" #include "libstoragemgmt/libstoragemgmt_disk.h" #include "libstoragemgmt/libstoragemgmt_error.h" #include "libstoragemgmt/libstoragemgmt_fs.h" #include "libstoragemgmt/libstoragemgmt_nfsexport.h" #include "libstoragemgmt/libstoragemgmt_plug_interface.h" #include "libstoragemgmt/libstoragemgmt_pool.h" #include "libstoragemgmt/libstoragemgmt_snapshot.h" #include "libstoragemgmt/libstoragemgmt_systems.h" #include "libstoragemgmt/libstoragemgmt_targetport.h" #include "libstoragemgmt/libstoragemgmt_types.h" #include "libstoragemgmt/libstoragemgmt_volumes.h" #include #include #include #include #include #include #ifdef __cplusplus extern "C" { #endif #define LSM_DEFAULT_PLUGIN_DIR "/var/run/lsm/ipc" /* We would certainly expand this to encompass the entire function */ #define MEMBER_GET(x, validation, member, error) \ if( validation(x) ) { \ return x->member; \ } else { \ return error; \ } int lsm_string_list_append(lsm_string_list * sl, const char *value) { int rc = LSM_ERR_INVALID_ARGUMENT; if (LSM_IS_STRING_LIST(sl)) { char *d = strdup(value); if (d) { g_ptr_array_add(sl->values, d); rc = LSM_ERR_OK; } else { rc = LSM_ERR_NO_MEMORY; } } return rc; } int lsm_string_list_delete(lsm_string_list * sl, uint32_t index) { int rc = LSM_ERR_INVALID_ARGUMENT; if (LSM_IS_STRING_LIST(sl)) { if (index < sl->values->len) { g_ptr_array_remove_index(sl->values, index); rc = LSM_ERR_OK; } } return rc; } int lsm_string_list_elem_set(lsm_string_list * sl, uint32_t index, const char *value) { int rc = LSM_ERR_OK; if (LSM_IS_STRING_LIST(sl)) { if (index < sl->values->len) { char *i = (char *) g_ptr_array_index(sl->values, index); if (i) { free(i); } g_ptr_array_index(sl->values, index) = strdup(value); if (!g_ptr_array_index(sl->values, index)) { rc = LSM_ERR_NO_MEMORY; } } else { g_ptr_array_set_size(sl->values, index + 1); g_ptr_array_index(sl->values, index) = strdup(value); if (!g_ptr_array_index(sl->values, index)) { rc = LSM_ERR_NO_MEMORY; } } } else { rc = LSM_ERR_INVALID_ARGUMENT; } return rc; } const char *lsm_string_list_elem_get(lsm_string_list * sl, uint32_t index) { if (LSM_IS_STRING_LIST(sl)) { if (index < sl->values->len) { return (const char *) g_ptr_array_index(sl->values, index); } } return NULL; } lsm_string_list *lsm_string_list_alloc(uint32_t size) { lsm_string_list *rc = NULL; rc = (lsm_string_list *) malloc(sizeof(lsm_string_list)); if (rc) { rc->magic = LSM_STRING_LIST_MAGIC; rc->values = g_ptr_array_sized_new(size); if (!rc->values) { rc->magic = LSM_DEL_MAGIC(LSM_STRING_LIST_MAGIC); free(rc); rc = NULL; } else { g_ptr_array_set_size(rc->values, size); g_ptr_array_set_free_func(rc->values, free); } } return rc; } int lsm_string_list_free(lsm_string_list * sl) { if (LSM_IS_STRING_LIST(sl)) { sl->magic = LSM_DEL_MAGIC(LSM_STRING_LIST_MAGIC); g_ptr_array_free(sl->values, TRUE); sl->values = NULL; free(sl); return LSM_ERR_OK; } return LSM_ERR_INVALID_ARGUMENT; } uint32_t lsm_string_list_size(lsm_string_list * sl) { if (LSM_IS_STRING_LIST(sl)) { return (uint32_t) sl->values->len; } return 0; } lsm_string_list *lsm_string_list_copy(lsm_string_list * src) { lsm_string_list *dest = NULL; if (LSM_IS_STRING_LIST(src)) { uint32_t size = lsm_string_list_size(src); dest = lsm_string_list_alloc(size); if (dest) { uint32_t i; for (i = 0; i < size; ++i) { if (LSM_ERR_OK != lsm_string_list_elem_set(dest, i, lsm_string_list_elem_get (src, i))) { /** We had an allocation failure setting an element item */ lsm_string_list_free(dest); dest = NULL; break; } } } } return dest; } lsm_connect *connection_get() { lsm_connect *c = (lsm_connect *) calloc(1, sizeof(lsm_connect)); if (c) { c->magic = LSM_CONNECT_MAGIC; } return c; } void connection_free(lsm_connect * c) { if (LSM_IS_CONNECT(c)) { c->magic = LSM_DEL_MAGIC(LSM_CONNECT_MAGIC); c->flags = 0; if (c->uri) { xmlFreeURI(c->uri); c->uri = NULL; } if (c->error) { lsm_error_free(c->error); c->error = NULL; } if (c->tp) { delete(c->tp); c->tp = NULL; } if (c->raw_uri) { free(c->raw_uri); c->raw_uri = NULL; } free(c); } } static int connection_establish(lsm_connect * c, const char *password, uint32_t timeout, lsm_error_ptr * e, lsm_flag flags) { int rc = LSM_ERR_OK; std::map < std::string, Value > params; try { params["uri"] = Value(c->raw_uri); if (password) { params["password"] = Value(password); } else { params["password"] = Value(); } params["timeout"] = Value(timeout); params["flags"] = Value(flags); Value p(params); c->tp->rpc("plugin_register", p); } catch(const ValueException & ve) { *e = lsm_error_create(LSM_ERR_TRANSPORT_SERIALIZATION, "Error in serialization", ve.what(), NULL, NULL, 0); rc = LSM_ERR_TRANSPORT_SERIALIZATION; } catch(const LsmException & le) { *e = lsm_error_create(LSM_ERR_TRANSPORT_COMMUNICATION, "Error in communication", le.what(), NULL, NULL, 0); rc = LSM_ERR_TRANSPORT_COMMUNICATION; } catch( ...) { *e = lsm_error_create(LSM_ERR_LIB_BUG, "Undefined exception", NULL, NULL, NULL, 0); rc = LSM_ERR_LIB_BUG; } return rc; } const char *uds_path(void) { const char *plugin_dir = getenv("LSM_UDS_PATH"); if (plugin_dir == NULL) { plugin_dir = LSM_DEFAULT_PLUGIN_DIR; } return plugin_dir; } int driver_load(lsm_connect * c, const char *plugin_name, const char *password, uint32_t timeout, lsm_error_ptr * e, int startup, lsm_flag flags) { int rc = LSM_ERR_OK; char *plugin_file = NULL; const char *plugin_dir = uds_path(); if (asprintf(&plugin_file, "%s/%s", plugin_dir, plugin_name) == -1) { return LSM_ERR_NO_MEMORY; } if (access(plugin_file, F_OK) != 0) { rc = LSM_ERR_PLUGIN_NOT_EXIST; } else { if (access(plugin_file, R_OK | W_OK) == 0) { int ec; int sd = Transport::socket_get(std::string(plugin_file), ec); if (sd >= 0) { c->tp = new Ipc(sd); if (startup) { if (connection_establish(c, password, timeout, e, flags)) { rc = LSM_ERR_PLUGIN_IPC_FAIL; } } } else { *e = lsm_error_create(LSM_ERR_PLUGIN_IPC_FAIL, "Unable to connect to plugin", NULL, dlerror(), NULL, 0); rc = LSM_ERR_PLUGIN_IPC_FAIL; } } else { *e = lsm_error_create(LSM_ERR_PLUGIN_SOCKET_PERMISSION, "Unable to access plugin", NULL, NULL, NULL, 0); rc = LSM_ERR_PLUGIN_SOCKET_PERMISSION; } } free(plugin_file); return rc; } lsm_error_ptr lsm_error_create(lsm_error_number code, const char *msg, const char *exception, const char *debug, const void *debug_data, uint32_t debug_data_size) { lsm_error_ptr err = (lsm_error_ptr) calloc(1, sizeof(lsm_error)); if (err) { err->magic = LSM_ERROR_MAGIC; err->code = code; /* Any of these strdup calls could fail, but we will continue */ if (msg) { err->message = strdup(msg); } if (exception) { err->exception = strdup(exception); } if (debug) { err->debug = strdup(debug); } /* We are not going to fail the creation of the error if we cannot * allocate the storage for the debug data. */ if (debug_data && (debug_data_size > 0)) { err->debug_data = malloc(debug_data_size); if (err->debug_data) { err->debug_data_size = debug_data_size; memcpy(err->debug_data, debug_data, debug_data_size); } } } return (lsm_error_ptr) err; } int lsm_error_free(lsm_error_ptr e) { if (!LSM_IS_ERROR(e)) { return LSM_ERR_INVALID_ARGUMENT; } if (e->debug_data) { free(e->debug_data); e->debug_data = NULL; e->debug_data_size = 0; } if (e->debug) { free(e->debug); e->debug = NULL; } if (e->exception) { free(e->exception); e->exception = NULL; } if (e->message) { free(e->message); e->message = NULL; } e->magic = LSM_DEL_MAGIC(LSM_ERROR_MAGIC); free(e); return LSM_ERR_OK; } #define LSM_RETURN_ERR_VAL(type_t, e, x, error) \ if( LSM_IS_ERROR(e) ) { \ return e->x; \ } \ return (type_t)error; \ lsm_error_number lsm_error_number_get(lsm_error_ptr e) { LSM_RETURN_ERR_VAL(lsm_error_number, e, code, -1); } char *lsm_error_message_get(lsm_error_ptr e) { LSM_RETURN_ERR_VAL(char *, e, message, NULL); } char *lsm_error_exception_get(lsm_error_ptr e) { LSM_RETURN_ERR_VAL(char *, e, exception, NULL); } char *lsm_error_debug_get(lsm_error_ptr e) { LSM_RETURN_ERR_VAL(char *, e, debug, NULL); } void *lsm_error_debug_data_get(lsm_error_ptr e, uint32_t * size) { if (LSM_IS_ERROR(e) && size != NULL) { if (e->debug_data) { *size = e->debug_data_size; return e->debug_data; } else { *size = 0; } } return NULL; } /** * When creating arrays of the different types the code is the same. This * macro is used to create type safe code. * @param name Name of the function * @param rtype return type * @return An array of pointers of rtype */ #define CREATE_ALLOC_ARRAY_FUNC(name, rtype)\ rtype *name(uint32_t size) \ { \ rtype *rc = NULL; \ if (size > 0) { \ rc = (rtype *) calloc(size, sizeof(rtype)); \ } \ return rc; \ } /** * Common macro for freeing the memory associated with one of these * data structures. * @param name Name of function to create * @param free_func Function to call to free one of the elements * @param record_type Type to record * @param error Value to return on error * @return None */ #define CREATE_FREE_ARRAY_FUNC(name, free_func, record_type, error)\ int name( record_type pa[], uint32_t size) \ { \ if (pa) { \ uint32_t i = 0; \ for (i = 0; i < size; ++i) { \ free_func(pa[i]); \ } \ free(pa); \ return LSM_ERR_OK; \ } \ return error; \ } CREATE_ALLOC_ARRAY_FUNC(lsm_pool_record_array_alloc, lsm_pool *) lsm_pool *lsm_pool_record_alloc(const char *id, const char *name, uint64_t element_type, uint64_t unsupported_actions, uint64_t totalSpace, uint64_t freeSpace, uint64_t status, const char *status_info, const char *system_id, const char *plugin_data) { lsm_pool *rc = (lsm_pool *) calloc(1, sizeof(lsm_pool)); if (rc) { rc->magic = LSM_POOL_MAGIC; rc->id = strdup(id); rc->name = strdup(name); rc->element_type = element_type; rc->unsupported_actions = unsupported_actions; rc->total_space = totalSpace; rc->free_space = freeSpace; rc->status = status; rc->status_info = strdup(status_info); rc->system_id = strdup(system_id); if (plugin_data) { rc->plugin_data = strdup(plugin_data); } if (!rc->id || !rc->name || !rc->system_id || !rc->status_info || (plugin_data && !rc->plugin_data)) { lsm_pool_record_free(rc); rc = NULL; } } return rc; } void lsm_pool_free_space_set(lsm_pool * p, uint64_t free_space) { if (LSM_IS_POOL(p)) { p->free_space = free_space; } } lsm_pool *lsm_pool_record_copy(lsm_pool * toBeCopied) { if (LSM_IS_POOL(toBeCopied)) { return lsm_pool_record_alloc(toBeCopied->id, toBeCopied->name, toBeCopied->element_type, toBeCopied->unsupported_actions, toBeCopied->total_space, toBeCopied->free_space, toBeCopied->status, toBeCopied->status_info, toBeCopied->system_id, toBeCopied->plugin_data); } return NULL; } int lsm_pool_record_free(lsm_pool * p) { if (LSM_IS_POOL(p)) { p->magic = LSM_DEL_MAGIC(LSM_POOL_MAGIC); if (p->name) { free(p->name); p->name = NULL; } if (p->status_info) { free(p->status_info); p->status_info = NULL; } if (p->id) { free(p->id); p->id = NULL; } if (p->system_id) { free(p->system_id); p->system_id = NULL; } free(p->plugin_data); p->plugin_data = NULL; free(p); return LSM_ERR_OK; } return LSM_ERR_INVALID_ARGUMENT; } CREATE_FREE_ARRAY_FUNC(lsm_pool_record_array_free, lsm_pool_record_free, lsm_pool *, LSM_ERR_INVALID_ARGUMENT) char *lsm_pool_name_get(lsm_pool * p) { if (LSM_IS_POOL(p)) { return p->name; } return NULL; } char *lsm_pool_id_get(lsm_pool * p) { if (LSM_IS_POOL(p)) { return p->id; } return NULL; } uint64_t lsm_pool_total_space_get(lsm_pool * p) { if (LSM_IS_POOL(p)) { return p->total_space; } return 0; } uint64_t lsm_pool_free_space_get(lsm_pool * p) { if (LSM_IS_POOL(p)) { return p->free_space; } return 0; } uint64_t lsm_pool_status_get(lsm_pool * p) { if (LSM_IS_POOL(p)) { return p->status; } return UINT64_MAX; } const char *lsm_pool_status_info_get(lsm_pool * p) { if (LSM_IS_POOL(p)) { return p->status_info; } return NULL; } char *lsm_pool_system_id_get(lsm_pool * p) { if (LSM_IS_POOL(p)) { return p->system_id; } return NULL; } MEMBER_FUNC_GET(const char *, lsm_pool_plugin_data_get, lsm_pool * p, p, LSM_IS_POOL, plugin_data, NULL) MEMBER_FUNC_GET(uint64_t, lsm_pool_element_type_get, lsm_pool * p, p, LSM_IS_POOL, element_type, 0) MEMBER_FUNC_GET(uint64_t, lsm_pool_unsupported_actions_get, lsm_pool * p, p, LSM_IS_POOL, element_type, 0) CREATE_ALLOC_ARRAY_FUNC(lsm_volume_record_array_alloc, lsm_volume *) lsm_volume *lsm_volume_record_alloc(const char *id, const char *name, const char *vpd83, uint64_t blockSize, uint64_t numberOfBlocks, uint32_t status, const char *system_id, const char *pool_id, const char *plugin_data) { if (vpd83 && (LSM_ERR_OK != lsm_volume_vpd83_verify(vpd83))) { return NULL; } lsm_volume *rc = (lsm_volume *) calloc(1, sizeof(lsm_volume)); if (rc) { rc->magic = LSM_VOL_MAGIC; rc->id = strdup(id); rc->name = strdup(name); if (vpd83) { rc->vpd83 = strdup(vpd83); } rc->block_size = blockSize; rc->number_of_blocks = numberOfBlocks; rc->admin_state = status; rc->system_id = strdup(system_id); rc->pool_id = strdup(pool_id); if (plugin_data) { rc->plugin_data = strdup(plugin_data); } if (!rc->id || !rc->name || (vpd83 && !rc->vpd83) || !rc->system_id || !rc->pool_id || (plugin_data && !rc->plugin_data)) { lsm_volume_record_free(rc); rc = NULL; } } return rc; } CREATE_ALLOC_ARRAY_FUNC(lsm_disk_record_array_alloc, lsm_disk *) lsm_disk *lsm_disk_record_alloc(const char *id, const char *name, lsm_disk_type disk_type, uint64_t block_size, uint64_t block_count, uint64_t disk_status, const char *system_id) { lsm_disk *rc = (lsm_disk *) malloc(sizeof(lsm_disk)); if (rc) { rc->magic = LSM_DISK_MAGIC; rc->id = strdup(id); rc->name = strdup(name); rc->disk_type = disk_type; rc->block_size = block_size; rc->block_count = block_count; rc->disk_status = disk_status; rc->system_id = strdup(system_id); if (!rc->id || !rc->name || !rc->system_id) { lsm_disk_record_free(rc); rc = NULL; } } return rc; } CREATE_ALLOC_ARRAY_FUNC(lsm_system_record_array_alloc, lsm_system *) lsm_system *lsm_system_record_alloc(const char *id, const char *name, uint32_t status, const char *status_info, const char *plugin_data) { lsm_system *rc = (lsm_system *) calloc(1, sizeof(lsm_system)); if (rc) { rc->magic = LSM_SYSTEM_MAGIC; rc->id = strdup(id); rc->name = strdup(name); rc->status = status; rc->status_info = strdup(status_info); if (plugin_data) { rc->plugin_data = strdup(plugin_data); } if (!rc->name || !rc->id || !rc->status_info || (plugin_data && !rc->plugin_data)) { lsm_system_record_free(rc); rc = NULL; } } return rc; } int lsm_system_record_free(lsm_system * s) { if (LSM_IS_SYSTEM(s)) { s->magic = LSM_DEL_MAGIC(LSM_SYSTEM_MAGIC); if (s->id) { free(s->id); s->id = NULL; } if (s->name) { free(s->name); s->name = NULL; } if (s->status_info) { free(s->status_info); s->status_info = NULL; } free(s->plugin_data); free(s); return LSM_ERR_OK; } return LSM_ERR_INVALID_ARGUMENT; } CREATE_FREE_ARRAY_FUNC(lsm_system_record_array_free, lsm_system_record_free, lsm_system *, LSM_ERR_INVALID_ARGUMENT) lsm_system *lsm_system_record_copy(lsm_system * s) { lsm_system *rc = NULL; if (LSM_IS_SYSTEM(s)) { rc = lsm_system_record_alloc(s->id, s->name, s->status, s->status_info, s->plugin_data); } return rc; } const char *lsm_system_id_get(lsm_system * s) { if (LSM_IS_SYSTEM(s)) { return s->id; } return NULL; } const char *lsm_system_name_get(lsm_system * s) { if (LSM_IS_SYSTEM(s)) { return s->name; } return NULL; } uint32_t lsm_system_status_get(lsm_system * s) { if (LSM_IS_SYSTEM(s)) { return s->status; } return UINT32_MAX; } MEMBER_FUNC_GET(const char *, lsm_system_plugin_data_get, lsm_system * s, s, LSM_IS_SYSTEM, plugin_data, NULL) lsm_volume *lsm_volume_record_copy(lsm_volume * vol) { lsm_volume *rc = NULL; if (LSM_IS_VOL(vol)) { rc = lsm_volume_record_alloc(vol->id, vol->name, vol->vpd83, vol->block_size, vol->number_of_blocks, vol->admin_state, vol->system_id, vol->pool_id, vol->plugin_data); } return rc; } int lsm_volume_record_free(lsm_volume * v) { if (LSM_IS_VOL(v)) { v->magic = LSM_DEL_MAGIC(LSM_VOL_MAGIC); if (v->id) { free(v->id); v->id = NULL; } if (v->name) { free(v->name); v->name = NULL; } if (v->vpd83) { free(v->vpd83); v->vpd83 = NULL; } if (v->system_id) { free(v->system_id); v->system_id = NULL; } if (v->pool_id) { free(v->pool_id); v->pool_id = NULL; } free(v->plugin_data); v->plugin_data = NULL; free(v); return LSM_ERR_OK; } return LSM_ERR_INVALID_ARGUMENT; } CREATE_FREE_ARRAY_FUNC(lsm_volume_record_array_free, lsm_volume_record_free, lsm_volume *, LSM_ERR_INVALID_ARGUMENT) lsm_disk *lsm_disk_record_copy(lsm_disk * disk) { if (LSM_IS_DISK(disk)) { return lsm_disk_record_alloc(disk->id, disk->name, disk->disk_type, disk->block_size, disk->block_count, disk->disk_status, disk->system_id); } return NULL; } int lsm_disk_record_free(lsm_disk * d) { if (LSM_IS_DISK(d)) { d->magic = LSM_DEL_MAGIC(LSM_DISK_MAGIC); free(d->id); d->id = NULL; free(d->name); d->name = NULL; free(d->system_id); d->system_id = NULL; free(d); return LSM_ERR_OK; } return LSM_ERR_INVALID_ARGUMENT; } CREATE_FREE_ARRAY_FUNC(lsm_disk_record_array_free, lsm_disk_record_free, lsm_disk *, LSM_ERR_INVALID_ARGUMENT) /* We would certainly expand this to encompass the entire function */ #define MEMBER_SET_REF(x, validation, member, value, alloc_func, \ free_func, error) \ if( validation(x) ) { \ if(x->member) { \ free_func(x->member); \ x->member = NULL; \ } \ if( value ) { \ x->member = alloc_func(value); \ if( !x->member ) { \ return LSM_ERR_NO_MEMORY; \ } \ } \ return LSM_ERR_OK; \ } else { \ return error; \ } /* We would certainly expand this to encompass the entire function */ #define MEMBER_SET_VAL(x, validation, member, value, error) \ if( validation(x) ) { \ x->member = value; \ return LSM_ERR_OK; \ } else { \ return error; \ } const char *lsm_volume_id_get(lsm_volume * v) { MEMBER_GET(v, LSM_IS_VOL, id, NULL); } const char *lsm_volume_name_get(lsm_volume * v) { MEMBER_GET(v, LSM_IS_VOL, name, NULL); } const char *lsm_volume_vpd83_get(lsm_volume * v) { MEMBER_GET(v, LSM_IS_VOL, vpd83, NULL); } uint64_t lsm_volume_block_size_get(lsm_volume * v) { MEMBER_GET(v, LSM_IS_VOL, block_size, 0); } uint64_t lsm_volume_number_of_blocks_get(lsm_volume * v) { MEMBER_GET(v, LSM_IS_VOL, number_of_blocks, 0); } uint32_t lsm_volume_admin_state_get(lsm_volume * v) { MEMBER_GET(v, LSM_IS_VOL, admin_state, 0); } char *lsm_volume_system_id_get(lsm_volume * v) { MEMBER_GET(v, LSM_IS_VOL, system_id, NULL); } char *lsm_volume_pool_id_get(lsm_volume * v) { MEMBER_GET(v, LSM_IS_VOL, pool_id, NULL); } MEMBER_FUNC_GET(const char *, lsm_volume_plugin_data_get, lsm_volume * v, v, LSM_IS_VOL, plugin_data, NULL) const char *lsm_disk_id_get(lsm_disk * d) { MEMBER_GET(d, LSM_IS_DISK, id, NULL); } const char *lsm_disk_name_get(lsm_disk * d) { MEMBER_GET(d, LSM_IS_DISK, name, NULL); } lsm_disk_type lsm_disk_type_get(lsm_disk * d) { MEMBER_GET(d, LSM_IS_DISK, disk_type, LSM_DISK_TYPE_OTHER); } uint64_t lsm_disk_block_size_get(lsm_disk * d) { MEMBER_GET(d, LSM_IS_DISK, block_size, 0); } uint64_t lsm_disk_number_of_blocks_get(lsm_disk * d) { MEMBER_GET(d, LSM_IS_DISK, block_count, 0); } uint64_t lsm_disk_status_get(lsm_disk * d) { MEMBER_GET(d, LSM_IS_DISK, disk_status, LSM_DISK_STATUS_UNKNOWN); } const char *lsm_disk_system_id_get(lsm_disk * d) { MEMBER_GET(d, LSM_IS_DISK, system_id, NULL); } CREATE_ALLOC_ARRAY_FUNC(lsm_access_group_record_array_alloc, lsm_access_group *) static lsm_string_list *standardize_init_list(lsm_string_list * initiators) { uint32_t i = 0; lsm_string_list *rc = lsm_string_list_copy(initiators); char *wwpn = NULL; if (rc) { for (i = 0; i < lsm_string_list_size(rc); ++i) { if (LSM_ERR_OK == wwpn_validate(lsm_string_list_elem_get(rc, i))) { /* We have a wwpn, switch to internal representation */ wwpn = wwpn_convert(lsm_string_list_elem_get(rc, i)); if (!wwpn || LSM_ERR_OK != lsm_string_list_elem_set(rc, i, wwpn)) { free(wwpn); lsm_string_list_free(rc); rc = NULL; break; } free(wwpn); wwpn = NULL; } } } return rc; } lsm_access_group *lsm_access_group_record_alloc(const char *id, const char *name, lsm_string_list * initiators, lsm_access_group_init_type init_type, const char *system_id, const char *plugin_data) { lsm_access_group *rc = NULL; if (id && name && system_id) { rc = (lsm_access_group *) malloc(sizeof(lsm_access_group)); if (rc) { rc->magic = LSM_ACCESS_GROUP_MAGIC; rc->id = strdup(id); rc->name = strdup(name); rc->system_id = strdup(system_id); rc->initiators = standardize_init_list(initiators); rc->init_type = init_type; if (plugin_data) { rc->plugin_data = strdup(plugin_data); } else { rc->plugin_data = NULL; } if (!rc->id || !rc->name || !rc->system_id || (plugin_data && !rc->plugin_data) || (initiators && !rc->initiators)) { lsm_access_group_record_free(rc); rc = NULL; } } } return rc; } lsm_access_group *lsm_access_group_record_copy(lsm_access_group * ag) { lsm_access_group *rc = NULL; if (LSM_IS_ACCESS_GROUP(ag)) { rc = lsm_access_group_record_alloc(ag->id, ag->name, ag->initiators, ag->init_type, ag->system_id, ag->plugin_data); } return rc; } int lsm_access_group_record_free(lsm_access_group * ag) { if (LSM_IS_ACCESS_GROUP(ag)) { ag->magic = LSM_DEL_MAGIC(LSM_ACCESS_GROUP_MAGIC); free(ag->id); free(ag->name); free(ag->system_id); lsm_string_list_free(ag->initiators); free(ag->plugin_data); free(ag); return LSM_ERR_OK; } return LSM_ERR_INVALID_ARGUMENT; } CREATE_FREE_ARRAY_FUNC(lsm_access_group_record_array_free, lsm_access_group_record_free, lsm_access_group *, LSM_ERR_INVALID_ARGUMENT) const char *lsm_access_group_id_get(lsm_access_group * group) { if (LSM_IS_ACCESS_GROUP(group)) { return group->id; } return NULL; } const char *lsm_access_group_name_get(lsm_access_group * group) { if (LSM_IS_ACCESS_GROUP(group)) { return group->name; } return NULL; } const char *lsm_access_group_system_id_get(lsm_access_group * group) { if (LSM_IS_ACCESS_GROUP(group)) { return group->system_id; } return NULL; } lsm_string_list *lsm_access_group_initiator_id_get(lsm_access_group * group) { if (LSM_IS_ACCESS_GROUP(group)) { return group->initiators; } return NULL; } void lsm_access_group_initiator_id_set(lsm_access_group * group, lsm_string_list * il) { if (LSM_IS_ACCESS_GROUP(group)) { if (group->initiators && group->initiators != il) { lsm_string_list_free(group->initiators); } group->initiators = lsm_string_list_copy(il); } } lsm_error_ptr lsm_error_last_get(lsm_connect * c) { if (LSM_IS_CONNECT(c)) { lsm_error_ptr e = c->error; c->error = NULL; return e; } return NULL; } lsm_block_range *lsm_block_range_record_alloc(uint64_t source_start, uint64_t dest_start, uint64_t block_count) { lsm_block_range *rc = NULL; rc = (lsm_block_range *) malloc(sizeof(lsm_block_range)); if (rc) { rc->magic = LSM_BLOCK_RANGE_MAGIC; rc->source_start = source_start; rc->dest_start = dest_start; rc->block_count = block_count; } return rc; } int lsm_block_range_record_free(lsm_block_range * br) { if (LSM_IS_BLOCK_RANGE(br)) { br->magic = LSM_DEL_MAGIC(LSM_BLOCK_RANGE_MAGIC); free(br); return LSM_ERR_OK; } return LSM_ERR_INVALID_ARGUMENT; } lsm_block_range *lsm_block_range_record_copy(lsm_block_range * source) { lsm_block_range *dest = NULL; if (LSM_IS_BLOCK_RANGE(source)) { dest = lsm_block_range_record_alloc(source->source_start, source->dest_start, source->block_count); } return dest; } CREATE_ALLOC_ARRAY_FUNC(lsm_block_range_record_array_alloc, lsm_block_range *) CREATE_FREE_ARRAY_FUNC(lsm_block_range_record_array_free, lsm_block_range_record_free, lsm_block_range *, LSM_ERR_INVALID_ARGUMENT) uint64_t lsm_block_range_source_start_get(lsm_block_range * br) { MEMBER_GET(br, LSM_IS_BLOCK_RANGE, source_start, 0); } uint64_t lsm_block_range_dest_start_get(lsm_block_range * br) { MEMBER_GET(br, LSM_IS_BLOCK_RANGE, dest_start, 0); } uint64_t lsm_block_range_block_count_get(lsm_block_range * br) { MEMBER_GET(br, LSM_IS_BLOCK_RANGE, block_count, 0); } lsm_fs *lsm_fs_record_alloc(const char *id, const char *name, uint64_t total_space, uint64_t free_space, const char *pool_id, const char *system_id, const char *plugin_data) { lsm_fs *rc = NULL; rc = (lsm_fs *) calloc(1, sizeof(lsm_fs)); if (rc) { rc->magic = LSM_FS_MAGIC; rc->id = strdup(id); rc->name = strdup(name); rc->pool_id = strdup(pool_id); rc->system_id = strdup(system_id); rc->total_space = total_space; rc->free_space = free_space; if (plugin_data) { rc->plugin_data = strdup(plugin_data); } if (!rc->id || !rc->name || !rc->pool_id || !rc->system_id || (plugin_data && !rc->plugin_data)) { lsm_fs_record_free(rc); rc = NULL; } } return rc; } int lsm_fs_record_free(lsm_fs * fs) { if (LSM_IS_FS(fs)) { fs->magic = LSM_DEL_MAGIC(LSM_FS_MAGIC); free(fs->id); free(fs->name); free(fs->pool_id); free(fs->system_id); free(fs->plugin_data); free(fs); return LSM_ERR_OK; } return LSM_ERR_INVALID_ARGUMENT; } lsm_fs *lsm_fs_record_copy(lsm_fs * source) { lsm_fs *dest = NULL; if (LSM_IS_FS(source)) { dest = lsm_fs_record_alloc(source->id, source->name, source->total_space, source->free_space, source->pool_id, source->system_id, source->plugin_data); } return dest; } CREATE_ALLOC_ARRAY_FUNC(lsm_fs_record_array_alloc, lsm_fs *) CREATE_FREE_ARRAY_FUNC(lsm_fs_record_array_free, lsm_fs_record_free, lsm_fs *, LSM_ERR_INVALID_ARGUMENT) const char *lsm_fs_id_get(lsm_fs * fs) { MEMBER_GET(fs, LSM_IS_FS, id, NULL); } const char *lsm_fs_name_get(lsm_fs * fs) { MEMBER_GET(fs, LSM_IS_FS, name, NULL); } const char *lsm_fs_system_id_get(lsm_fs * fs) { MEMBER_GET(fs, LSM_IS_FS, system_id, NULL); } const char *lsm_fs_pool_id_get(lsm_fs * fs) { MEMBER_GET(fs, LSM_IS_FS, pool_id, NULL); } uint64_t lsm_fs_total_space_get(lsm_fs * fs) { MEMBER_GET(fs, LSM_IS_FS, total_space, 0); } uint64_t lsm_fs_free_space_get(lsm_fs * fs) { MEMBER_GET(fs, LSM_IS_FS, free_space, 0); } MEMBER_FUNC_GET(const char *, lsm_fs_plugin_data_get, lsm_fs * fs, fs, LSM_IS_POOL, plugin_data, NULL) lsm_fs_ss *lsm_fs_ss_record_alloc(const char *id, const char *name, uint64_t ts, const char *plugin_data) { lsm_fs_ss *rc = (lsm_fs_ss *) calloc(1, sizeof(lsm_fs_ss)); if (rc) { rc->magic = LSM_SS_MAGIC; rc->id = strdup(id); rc->name = strdup(name); rc->ts = ts; if (plugin_data) { rc->plugin_data = strdup(plugin_data); } if (!rc->id || !rc->name || (plugin_data && !rc->plugin_data)) { lsm_fs_ss_record_free(rc); rc = NULL; } } return rc; } lsm_fs_ss *lsm_fs_ss_record_copy(lsm_fs_ss * source) { lsm_fs_ss *rc = NULL; if (LSM_IS_SS(source)) { rc = lsm_fs_ss_record_alloc(source->id, source->name, source->ts, source->plugin_data); } return rc; } int lsm_fs_ss_record_free(lsm_fs_ss * ss) { if (LSM_IS_SS(ss)) { ss->magic = LSM_DEL_MAGIC(LSM_SS_MAGIC); free(ss->id); free(ss->name); free(ss->plugin_data); free(ss); return LSM_ERR_OK; } return LSM_ERR_INVALID_ARGUMENT; } CREATE_ALLOC_ARRAY_FUNC(lsm_fs_ss_record_array_alloc, lsm_fs_ss *) CREATE_FREE_ARRAY_FUNC(lsm_fs_ss_record_array_free, lsm_fs_ss_record_free, lsm_fs_ss *, LSM_ERR_INVALID_ARGUMENT) const char *lsm_fs_ss_id_get(lsm_fs_ss * ss) { MEMBER_GET(ss, LSM_IS_SS, id, NULL); } const char *lsm_fs_ss_name_get(lsm_fs_ss * ss) { MEMBER_GET(ss, LSM_IS_SS, name, NULL); } uint64_t lsm_fs_ss_time_stamp_get(lsm_fs_ss * ss) { MEMBER_GET(ss, LSM_IS_SS, ts, 0); } MEMBER_FUNC_GET(const char *, lsm_fs_ss_plugin_data_get, lsm_fs_ss * ss, ss, LSM_IS_SS, plugin_data, NULL) lsm_nfs_export *lsm_nfs_export_record_alloc(const char *id, const char *fs_id, const char *export_path, const char *auth, lsm_string_list * root, lsm_string_list * rw, lsm_string_list * ro, uint64_t anonuid, uint64_t anongid, const char *options, const char *plugin_data) { lsm_nfs_export *rc = NULL; /* This is required */ if (fs_id) { rc = (lsm_nfs_export *) calloc(1, sizeof(lsm_nfs_export)); if (rc) { rc->magic = LSM_NFS_EXPORT_MAGIC; rc->id = (id) ? strdup(id) : NULL; rc->fs_id = strdup(fs_id); rc->export_path = (export_path) ? strdup(export_path) : NULL; rc->auth_type = (auth) ? strdup(auth) : NULL; rc->root = lsm_string_list_copy(root); rc->rw = lsm_string_list_copy(rw); rc->ro = lsm_string_list_copy(ro); rc->anonuid = anonuid; rc->anongid = anongid; rc->options = (options) ? strdup(options) : NULL; if (plugin_data) { rc->plugin_data = strdup(plugin_data); } if (!rc->id || !rc->fs_id || (export_path && !rc->export_path) || (auth && !rc->auth_type) || (root && !rc->root) || (rw && !rc->rw) || (ro && !rc->ro) || (options && !rc->options) || (plugin_data && !rc->plugin_data)) { lsm_nfs_export_record_free(rc); rc = NULL; } } } return rc; } int lsm_nfs_export_record_free(lsm_nfs_export * exp) { if (LSM_IS_NFS_EXPORT(exp)) { exp->magic = LSM_DEL_MAGIC(LSM_NFS_EXPORT_MAGIC); free(exp->id); free(exp->fs_id); free(exp->export_path); free(exp->auth_type); lsm_string_list_free(exp->root); lsm_string_list_free(exp->rw); lsm_string_list_free(exp->ro); free(exp->options); free(exp->plugin_data); free(exp); return LSM_ERR_OK; } return LSM_ERR_INVALID_ARGUMENT; } lsm_nfs_export *lsm_nfs_export_record_copy(lsm_nfs_export * s) { if (LSM_IS_NFS_EXPORT(s)) { return lsm_nfs_export_record_alloc(s->id, s->fs_id, s->export_path, s->auth_type, s->root, s->rw, s->ro, s->anonuid, s->anongid, s->options, s->plugin_data); } return NULL; } CREATE_ALLOC_ARRAY_FUNC(lsm_nfs_export_record_array_alloc, lsm_nfs_export *) CREATE_FREE_ARRAY_FUNC(lsm_nfs_export_record_array_free, lsm_nfs_export_record_free, lsm_nfs_export *, LSM_ERR_INVALID_ARGUMENT) const char *lsm_nfs_export_id_get(lsm_nfs_export * exp) { MEMBER_GET(exp, LSM_IS_NFS_EXPORT, id, NULL); } int lsm_nfs_export_id_set(lsm_nfs_export * exp, const char *ep) { MEMBER_SET_REF(exp, LSM_IS_NFS_EXPORT, id, ep, strdup, free, LSM_ERR_INVALID_ARGUMENT); } const char *lsm_nfs_export_fs_id_get(lsm_nfs_export * exp) { MEMBER_GET(exp, LSM_IS_NFS_EXPORT, fs_id, NULL); } int lsm_nfs_export_fs_id_set(lsm_nfs_export * exp, const char *fs_id) { MEMBER_SET_REF(exp, LSM_IS_NFS_EXPORT, fs_id, fs_id, strdup, free, LSM_ERR_INVALID_ARGUMENT); } const char *lsm_nfs_export_export_path_get(lsm_nfs_export * exp) { MEMBER_GET(exp, LSM_IS_NFS_EXPORT, export_path, NULL); } int lsm_nfs_export_export_path_set(lsm_nfs_export * exp, const char *ep) { MEMBER_SET_REF(exp, LSM_IS_NFS_EXPORT, export_path, ep, strdup, free, LSM_ERR_INVALID_ARGUMENT); } const char *lsm_nfs_export_auth_type_get(lsm_nfs_export * exp) { MEMBER_GET(exp, LSM_IS_NFS_EXPORT, auth_type, NULL); } int lsm_nfs_export_auth_type_set(lsm_nfs_export * exp, const char *auth) { MEMBER_SET_REF(exp, LSM_IS_NFS_EXPORT, auth_type, auth, strdup, free, LSM_ERR_INVALID_ARGUMENT); } lsm_string_list *lsm_nfs_export_root_get(lsm_nfs_export * exp) { MEMBER_GET(exp, LSM_IS_NFS_EXPORT, root, NULL); } int lsm_nfs_export_root_set(lsm_nfs_export * exp, lsm_string_list * root) { MEMBER_SET_REF(exp, LSM_IS_NFS_EXPORT, root, root, lsm_string_list_copy, lsm_string_list_free, LSM_ERR_INVALID_ARGUMENT); } lsm_string_list *lsm_nfs_export_read_write_get(lsm_nfs_export * exp) { MEMBER_GET(exp, LSM_IS_NFS_EXPORT, rw, NULL); } int lsm_nfs_export_read_write_set(lsm_nfs_export * exp, lsm_string_list * rw) { MEMBER_SET_REF(exp, LSM_IS_NFS_EXPORT, rw, rw, lsm_string_list_copy, lsm_string_list_free, LSM_ERR_INVALID_ARGUMENT); } lsm_string_list *lsm_nfs_export_read_only_get(lsm_nfs_export * exp) { MEMBER_GET(exp, LSM_IS_NFS_EXPORT, ro, NULL); } int lsm_nfs_export_read_only_set(lsm_nfs_export * exp, lsm_string_list * ro) { MEMBER_SET_REF(exp, LSM_IS_NFS_EXPORT, ro, ro, lsm_string_list_copy, lsm_string_list_free, LSM_ERR_INVALID_ARGUMENT); } uint64_t lsm_nfs_export_anon_uid_get(lsm_nfs_export * exp) { MEMBER_GET(exp, LSM_IS_NFS_EXPORT, anonuid, ANON_UID_GID_ERROR); } int lsm_nfs_export_anon_uid_set(lsm_nfs_export * exp, uint64_t value) { MEMBER_SET_VAL(exp, LSM_IS_NFS_EXPORT, anonuid, value, LSM_ERR_INVALID_ARGUMENT); } uint64_t lsm_nfs_export_anon_gid_get(lsm_nfs_export * exp) { MEMBER_GET(exp, LSM_IS_NFS_EXPORT, anongid, ANON_UID_GID_ERROR); } int lsm_nfs_export_anon_gid_set(lsm_nfs_export * exp, uint64_t value) { MEMBER_SET_VAL(exp, LSM_IS_NFS_EXPORT, anongid, value, LSM_ERR_INVALID_ARGUMENT); } const char *lsm_nfs_export_options_get(lsm_nfs_export * exp) { MEMBER_GET(exp, LSM_IS_NFS_EXPORT, options, NULL); } int lsm_nfs_export_options_set(lsm_nfs_export * exp, const char *value) { MEMBER_SET_REF(exp, LSM_IS_NFS_EXPORT, options, value, strdup, free, LSM_ERR_INVALID_ARGUMENT); } MEMBER_FUNC_GET(const char *, lsm_nfs_export_plugin_data_get, lsm_nfs_export * exp, exp, LSM_IS_NFS_EXPORT, plugin_data, NULL) lsm_capability_value_type lsm_capability_get(lsm_storage_capabilities * cap, lsm_capability_type t) { lsm_capability_value_type rc = LSM_CAP_UNSUPPORTED; if (LSM_IS_CAPABILITIY(cap) && (uint32_t) t < cap->len) { rc = (lsm_capability_value_type) cap->cap[t]; } return rc; } int LSM_DLL_EXPORT lsm_capability_supported(lsm_storage_capabilities * cap, lsm_capability_type t) { if (lsm_capability_get(cap, t) == LSM_CAP_SUPPORTED) { return 1; } return 0; } int lsm_capability_set(lsm_storage_capabilities * cap, lsm_capability_type t, lsm_capability_value_type v) { int rc = LSM_ERR_INVALID_ARGUMENT; if (LSM_IS_CAPABILITIY(cap)) { if ((uint32_t) t < cap->len) { cap->cap[t] = v; rc = LSM_ERR_OK; } } return rc; } int lsm_capability_set_n(lsm_storage_capabilities * cap, lsm_capability_value_type v, ...) { int rc = LSM_ERR_OK; int index = 0; if (!LSM_IS_CAPABILITIY(cap)) { return LSM_ERR_INVALID_ARGUMENT; } va_list var_arg; va_start(var_arg, v); while ((index = va_arg(var_arg, int)) != -1) { if (index < (int) cap->len) { cap->cap[index] = v; } else { rc = LSM_ERR_INVALID_ARGUMENT; break; } } va_end(var_arg); return rc; } static char *bytes_to_string(uint8_t * a, uint32_t len) { char *buff = NULL; if (a && len) { uint32_t i = 0; char *tmp = NULL; size_t str_len = ((sizeof(char) * 2) * len + 1); buff = (char *) malloc(str_len); if (buff) { tmp = buff; for (i = 0; i < len; ++i) { tmp += sprintf(tmp, "%02x", a[i]); } buff[str_len - 1] = '\0'; } } return buff; } static uint8_t *string_to_bytes(const char *hex_string, uint32_t * l) { uint8_t *rc = NULL; if (hex_string && l) { size_t len = strlen(hex_string); if (len && (len % 2) == 0) { len /= 2; rc = (uint8_t *) malloc(sizeof(uint8_t) * len); if (rc) { size_t i; const char *t = hex_string; *l = len; for (i = 0; i < len; ++i) { if (1 != sscanf(t, "%02hhx", &rc[i])) { free(rc); rc = NULL; *l = 0; break; } t += 2; } } } } return rc; } lsm_storage_capabilities *lsm_capability_record_alloc(const char *value) { lsm_storage_capabilities *rc = NULL; rc = (lsm_storage_capabilities *) malloc(sizeof(struct _lsm_storage_capabilities)); if (rc) { rc->magic = LSM_CAPABILITIES_MAGIC; if (value) { rc->cap = string_to_bytes(value, &rc->len); } else { rc->cap = (uint8_t *) calloc(LSM_CAP_MAX, sizeof(uint8_t)); if (rc->cap) { rc->len = LSM_CAP_MAX; } } if (!rc->cap) { lsm_capability_record_free(rc); rc = NULL; } } return rc; } int lsm_capability_record_free(lsm_storage_capabilities * cap) { if (LSM_IS_CAPABILITIY(cap)) { cap->magic = LSM_DEL_MAGIC(LSM_CAPABILITIES_MAGIC); free(cap->cap); free(cap); return LSM_ERR_OK; } return LSM_ERR_INVALID_ARGUMENT; } char *capability_string(lsm_storage_capabilities * c) { char *rc = NULL; if (LSM_IS_CAPABILITIY(c)) { rc = bytes_to_string(c->cap, c->len); } return rc; } lsm_hash *lsm_hash_alloc(void) { lsm_hash *rc = NULL; rc = (lsm_hash *) malloc(sizeof(lsm_hash)); if (rc) { rc->magic = LSM_HASH_MAGIC; rc->data = g_hash_table_new_full(g_str_hash, g_str_equal, free, free); if (!rc->data) { lsm_hash_free(rc); rc = NULL; } } return rc; } lsm_hash *lsm_hash_copy(lsm_hash * src) { GHashTableIter iter; gpointer key; gpointer value; lsm_hash *dest = NULL; if (LSM_IS_HASH(src)) { dest = lsm_hash_alloc(); if (dest) { /* Walk through each from src and duplicate it to dest */ g_hash_table_iter_init(&iter, src->data); while (g_hash_table_iter_next(&iter, &key, &value)) { if (LSM_ERR_OK != lsm_hash_string_set(dest, (const char *) key, (const char *) value)) { lsm_hash_free(dest); dest = NULL; } } } } return dest; } int lsm_hash_free(lsm_hash * op) { if (LSM_IS_HASH(op)) { op->magic = LSM_DEL_MAGIC(LSM_HASH_MAGIC); if (op->data) { g_hash_table_destroy(op->data); } free(op); return LSM_ERR_OK; } return LSM_ERR_INVALID_ARGUMENT; } int lsm_hash_keys(lsm_hash * op, lsm_string_list ** l) { GHashTableIter iter; gpointer key; gpointer value; if (LSM_IS_HASH(op)) { int count = g_hash_table_size(op->data); if (count) { *l = lsm_string_list_alloc(0); g_hash_table_iter_init(&iter, op->data); while (g_hash_table_iter_next(&iter, &key, &value)) { if (LSM_ERR_OK != lsm_string_list_append(*l, (char *) key)) { lsm_string_list_free(*l); *l = NULL; return LSM_ERR_NO_MEMORY; } } } return LSM_ERR_OK; } return LSM_ERR_INVALID_ARGUMENT; } const char *lsm_hash_string_get(lsm_hash * op, const char *key) { if (LSM_IS_HASH(op)) { return (const char *) g_hash_table_lookup(op->data, key); } return NULL; } int lsm_hash_string_set(lsm_hash * op, const char *key, const char *value) { if (LSM_IS_HASH(op)) { char *k_value = strdup(key); char *d_value = strdup(value); if (k_value && d_value) { g_hash_table_remove(op->data, (gpointer) k_value); g_hash_table_insert(op->data, (gpointer) k_value, (gpointer) d_value); return LSM_ERR_OK; } else { free(k_value); free(d_value); return LSM_ERR_NO_MEMORY; } } return LSM_ERR_INVALID_ARGUMENT; } lsm_target_port *lsm_target_port_record_alloc(const char *id, lsm_target_port_type port_type, const char *service_address, const char *network_address, const char *physical_address, const char *physical_name, const char *system_id, const char *plugin_data) { lsm_target_port *rc = (lsm_target_port *) calloc(1, sizeof(lsm_target_port)); if (rc) { rc->magic = LSM_TARGET_PORT_MAGIC; rc->id = strdup(id); rc->port_type = port_type; rc->service_address = strdup(service_address); rc->network_address = strdup(network_address); rc->physical_address = strdup(physical_address); rc->physical_name = strdup(physical_name); rc->system_id = strdup(system_id); rc->plugin_data = (plugin_data) ? strdup(plugin_data) : NULL; if (!rc->id || !rc->service_address || !rc->network_address || !rc->physical_address || !rc->physical_name || !rc->system_id || (plugin_data && !rc->plugin_data)) { lsm_target_port_record_free(rc); rc = NULL; } } return rc; } int lsm_target_port_record_free(lsm_target_port * tp) { if (LSM_IS_TARGET_PORT(tp)) { tp->magic = LSM_DEL_MAGIC(LSM_TARGET_PORT_MAGIC); free(tp->id); tp->id = NULL; free(tp->plugin_data); tp->plugin_data = NULL; free(tp->system_id); tp->system_id = NULL; free(tp->physical_name); tp->physical_name = NULL; free(tp->physical_address); tp->physical_address = NULL; free(tp->network_address); tp->network_address = NULL; free(tp->service_address); tp->service_address = NULL; free(tp); return LSM_ERR_OK; } return LSM_ERR_INVALID_ARGUMENT; } lsm_target_port LSM_DLL_EXPORT *lsm_target_port_copy(lsm_target_port * tp) { lsm_target_port *rc = NULL; if (LSM_IS_TARGET_PORT(tp)) { rc = lsm_target_port_record_alloc(tp->id, tp->port_type, tp->service_address, tp->network_address, tp->physical_address, tp->physical_name, tp->system_id, tp->plugin_data); } return rc; } MEMBER_FUNC_GET(const char *, lsm_target_port_id_get, lsm_target_port * tp, tp, LSM_IS_TARGET_PORT, id, NULL) MEMBER_FUNC_GET(lsm_target_port_type, lsm_target_port_type_get, lsm_target_port * tp, tp, LSM_IS_TARGET_PORT, port_type, LSM_TARGET_PORT_TYPE_OTHER) MEMBER_FUNC_GET(const char *, lsm_target_port_service_address_get, lsm_target_port * tp, tp, LSM_IS_TARGET_PORT, service_address, NULL) MEMBER_FUNC_GET(const char *, lsm_target_port_network_address_get, lsm_target_port * tp, tp, LSM_IS_TARGET_PORT, network_address, NULL) MEMBER_FUNC_GET(const char *, lsm_target_port_physical_address_get, lsm_target_port * tp, tp, LSM_IS_TARGET_PORT, physical_address, NULL) MEMBER_FUNC_GET(const char *, lsm_target_port_physical_name_get, lsm_target_port * tp, tp, LSM_IS_TARGET_PORT, physical_name, NULL) MEMBER_FUNC_GET(const char *, lsm_target_port_system_id_get, lsm_target_port * tp, tp, LSM_IS_TARGET_PORT, system_id, NULL) CREATE_ALLOC_ARRAY_FUNC(lsm_target_port_record_array_alloc, lsm_target_port *) CREATE_FREE_ARRAY_FUNC(lsm_target_port_record_array_free, lsm_target_port_record_free, lsm_target_port *, LSM_ERR_INVALID_ARGUMENT) static int reg_ex_match(const char *pattern, const char *str) { regex_t start_state; int status = 0; int rc = regcomp(&start_state, pattern, REG_EXTENDED); if (rc) { // Development only when changing regular expression //fprintf(stderr, "%s: bad pattern: '%s' %d\n", str, pattern, rc); return -1; } status = regexec(&start_state, str, 0, NULL, 0); regfree(&start_state); return status; } int iqn_validate(const char *iqn) { if ((iqn && strlen(iqn) > 4) && (0 == strncmp(iqn, "iqn", 3) || 0 == strncmp(iqn, "naa", 3) || 0 == strncmp(iqn, "eui", 3))) { return LSM_ERR_OK; } return LSM_ERR_INVALID_ARGUMENT; } int wwpn_validate(const char *wwpn) { const char *pattern = "^(0x|0X)?([0-9A-Fa-f]{2})" "(([\\.\\:\\-])?[0-9A-Fa-f]{2}){7}$"; if (0 == reg_ex_match(pattern, wwpn)) { return LSM_ERR_OK; } return LSM_ERR_INVALID_ARGUMENT; } char *wwpn_convert(const char *wwpn) { size_t i = 0; size_t out = 0; char *rc = NULL; if (LSM_ERR_OK == wwpn_validate(wwpn)) { rc = (char *) calloc(24, 1); size_t len = strlen(wwpn); if (wwpn[1] == 'x' || wwpn[1] == 'X') { i = 2; } for (; i < len; ++i) { if (wwpn[i] != ':' && wwpn[i] != '-' && wwpn[i] != '.') { rc[out++] = tolower(wwpn[i]); } else { rc[out++] = ':'; } } } return rc; } #ifdef __cplusplus } #endif libstoragemgmt-1.2.3/c_binding/lsm_mgmt.cpp0000664000175000017500000022005112537737032015730 00000000000000/* * Copyright (C) 2011-2014 Red Hat, Inc. * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; If not, see . * * Author: tasleson */ #include "libstoragemgmt/libstoragemgmt.h" #include "libstoragemgmt/libstoragemgmt_error.h" #include "libstoragemgmt/libstoragemgmt_plug_interface.h" #include "libstoragemgmt/libstoragemgmt_types.h" #include #include #include #include #include "lsm_datatypes.hpp" #include "lsm_convert.hpp" #define COUNT_OF(x) \ ((sizeof(x)/sizeof(0[x])) / ((size_t)(!(sizeof(x) % sizeof(0[x]))))) static const char *const POOL_SEARCH_KEYS[] = { "id", "system_id" }; #define POOL_SEARCH_KEYS_COUNT COUNT_OF(POOL_SEARCH_KEYS) static const char *const VOLUME_SEARCH_KEYS[] = { "id", "system_id", "pool_id" }; #define VOLUME_SEARCH_KEYS_COUNT COUNT_OF(VOLUME_SEARCH_KEYS) static const char *const DISK_SEARCH_KEYS[] = { "id", "system_id" }; #define DISK_SEARCH_KEYS_COUNT COUNT_OF(DISK_SEARCH_KEYS) static const char *const FS_SEARCH_KEYS[] = { "id", "system_id", "pool_id" }; #define FS_SEARCH_KEYS_COUNT COUNT_OF(FS_SEARCH_KEYS) static const char *const NFS_EXPORT_SEARCH_KEYS[] = { "id", "fs_id" }; #define NFS_EXPORT_SEARCH_KEYS_COUNT COUNT_OF(NFS_EXPORT_SEARCH_KEYS) static const char *const ACCESS_GROUP_SEARCH_KEYS[] = { "id", "system_id" }; #define ACCESS_GROUP_SEARCH_KEYS_COUNT COUNT_OF(ACCESS_GROUP_SEARCH_KEYS) static const char *const TARGET_PORT_SEARCH_KEYS[] = { "id", "system_id" }; #define TARGET_PORT_SEARCH_KEYS_COUNT COUNT_OF(TARGET_PORT_SEARCH_KEYS) /** * Common code to validate and initialize the connection. */ #define CONN_SETUP(c) do { \ if(!LSM_IS_CONNECT(c)) { \ return LSM_ERR_INVALID_ARGUMENT;\ } \ lsm_error_free(c->error); \ c->error = NULL; \ } while (0) static int check_search_key(const char *search_key, const char *const supported_keys[], size_t supported_keys_count) { size_t i = 0; for (i = 0; i < supported_keys_count; ++i) { if (0 == strcmp(search_key, supported_keys[i])) { return 1; } } return 0; } int lsm_initiator_id_verify(const char *init_id, lsm_access_group_init_type * init_type) { int rc = LSM_ERR_INVALID_ARGUMENT; if (init_id != NULL && strlen(init_id) > 3) { switch (*init_type) { case (LSM_ACCESS_GROUP_INIT_TYPE_UNKNOWN): if (0 == iqn_validate(init_id)) { *init_type = LSM_ACCESS_GROUP_INIT_TYPE_ISCSI_IQN; rc = LSM_ERR_OK; } if (0 == wwpn_validate(init_id)) { *init_type = LSM_ACCESS_GROUP_INIT_TYPE_WWPN; rc = LSM_ERR_OK; } break; case (LSM_ACCESS_GROUP_INIT_TYPE_ISCSI_IQN): if (0 == iqn_validate(init_id)) { *init_type = LSM_ACCESS_GROUP_INIT_TYPE_ISCSI_IQN; rc = LSM_ERR_OK; } break; case (LSM_ACCESS_GROUP_INIT_TYPE_WWPN): if (0 == wwpn_validate(init_id)) { *init_type = LSM_ACCESS_GROUP_INIT_TYPE_WWPN; rc = LSM_ERR_OK; } break; default: break; } } return rc; } int lsm_volume_vpd83_verify(const char *vpd83) { int rc = LSM_ERR_INVALID_ARGUMENT; size_t i; size_t vpd83_len; if (vpd83) { vpd83_len = strlen(vpd83); if ((vpd83_len == 32 && vpd83[0] == '6') || (vpd83_len == 16 && vpd83[0] == '2') || (vpd83_len == 16 && vpd83[0] == '3') || (vpd83_len == 16 && vpd83[0] == '5')) { for (i = 0; i < vpd83_len; ++i) { char v = vpd83[i]; // 0-9 || a-f is OK if (!((v >= 48 && v <= 57) || (v >= 97 && v <= 102))) { return rc; } } rc = LSM_ERR_OK; } } return rc; } static int verify_initiator_id(const char *id, lsm_access_group_init_type t, Value & initiator) { initiator = Value(id); if (t == LSM_ACCESS_GROUP_INIT_TYPE_WWPN) { char *wwpn = wwpn_convert(id); if (wwpn) { initiator = Value(wwpn); free(wwpn); wwpn = NULL; } else { return LSM_ERR_INVALID_ARGUMENT; } } else if (t == LSM_ACCESS_GROUP_INIT_TYPE_ISCSI_IQN) { if (iqn_validate(id)) { return LSM_ERR_INVALID_ARGUMENT; } } return LSM_ERR_OK; } /** * Strings are non null with a len >= 1 */ #define CHECK_STR(x) ( !(x) || !strlen(x) ) /** * When we pass in a pointer for an out value we want to make sure that * the pointer isn't null, and that the dereferenced value is != NULL to prevent * memory leaks. */ #define CHECK_RP(x) (!(x) || *(x) != NULL) int lsm_connect_password(const char *uri, const char *password, lsm_connect ** conn, uint32_t timeout, lsm_error_ptr * e, lsm_flag flags) { int rc = LSM_ERR_OK; lsm_connect *c = NULL; /* Password is optional */ if (CHECK_STR(uri) || CHECK_RP(conn) || !timeout || CHECK_RP(e) || LSM_FLAG_UNUSED_CHECK(flags)) { return LSM_ERR_INVALID_ARGUMENT; } c = connection_get(); if (c) { c->uri = xmlParseURI(uri); if (c->uri && c->uri->scheme) { c->raw_uri = strdup(uri); if (c->raw_uri) { rc = driver_load(c, c->uri->scheme, password, timeout, e, 1, flags); if (rc == LSM_ERR_OK) { *conn = (lsm_connect *) c; } } else { rc = LSM_ERR_NO_MEMORY; } } else { rc = LSM_ERR_INVALID_ARGUMENT; } /*If we fail for any reason free resources associated with connection */ if (rc != LSM_ERR_OK) { connection_free(c); } } else { rc = LSM_ERR_NO_MEMORY; } return rc; } static int lsm_error_log(lsm_connect * c, lsm_error_ptr error) { if (!LSM_IS_CONNECT(c) || !LSM_IS_ERROR(error)) { return LSM_ERR_INVALID_ARGUMENT; } if (c->error) { lsm_error_free(c->error); c->error = NULL; } c->error = error; return LSM_ERR_OK; } static lsm_error_number log_exception(lsm_connect * c, lsm_error_number error, const char *message, const char *exception_msg) { lsm_error_ptr err = lsm_error_create(error, message, exception_msg, NULL, NULL, 0); if (err) { lsm_error_log(c, err); } return error; } static int rpc(lsm_connect * c, const char *method, const Value & parameters, Value & response) throw() { try { response = c->tp->rpc(method, parameters); } catch(const ValueException & ve) { return log_exception(c, LSM_ERR_TRANSPORT_SERIALIZATION, "Serialization error", ve.what()); } catch(const LsmException & le) { return log_exception(c, (lsm_error_number) le.error_code, le.what(), NULL); } catch(const EOFException & eof) { return log_exception(c, LSM_ERR_TRANSPORT_COMMUNICATION, "Plug-in died", "Check syslog"); } catch( ...) { return log_exception(c, LSM_ERR_LIB_BUG, "Unexpected exception", "Unknown exception"); } return LSM_ERR_OK; } static int job_check(lsm_connect * c, int rc, Value & response, char **job) { try { if (LSM_ERR_OK == rc) { //We get a value back, either null or job id. if (Value::string_t == response.valueType()) { *job = strdup(response.asString().c_str()); if (*job) { rc = LSM_ERR_JOB_STARTED; } else { rc = LSM_ERR_NO_MEMORY; } } else { *job = NULL; } } } catch(const ValueException & ve) { rc = log_exception(c, LSM_ERR_PLUGIN_BUG, "Wrong type", ve.what()); } return rc; } static int get_access_groups(lsm_connect * c, int rc, Value & response, lsm_access_group ** groups[], uint32_t * count) { try { if (LSM_ERR_OK == rc && Value::array_t == response.valueType()) { rc = value_array_to_access_groups(response, groups, count); } } catch(const ValueException & ve) { rc = log_exception(c, LSM_ERR_PLUGIN_BUG, "Unexpected type", ve.what()); } return rc; } static int add_search_params(std::map < std::string, Value > &p, const char *k, const char *v, const char *const supported_keys[], size_t supported_keys_count) { if (k) { if (v) { if (!check_search_key(k, supported_keys, supported_keys_count)) { return LSM_ERR_UNSUPPORTED_SEARCH_KEY; } } else { return LSM_ERR_INVALID_ARGUMENT; } } p["search_key"] = Value(k); p["search_value"] = Value(v); return LSM_ERR_OK; } int lsm_connect_close(lsm_connect * c, lsm_flag flags) { CONN_SETUP(c); if (LSM_FLAG_UNUSED_CHECK(flags)) { return LSM_ERR_INVALID_ARGUMENT; } std::map < std::string, Value > p; p["flags"] = Value(flags); Value parameters(p); Value response; //No response data needed on plugin_unregister int rc = rpc(c, "plugin_unregister", parameters, response); //Free the connection. connection_free(c); return rc; } static Value _create_flag_param(lsm_flag flags) { std::map < std::string, Value > p; p["flags"] = Value(flags); return Value(p); } int lsm_plugin_info_get(lsm_connect * c, char **desc, char **version, lsm_flag flags) { int rc = LSM_ERR_OK; CONN_SETUP(c); if (LSM_FLAG_UNUSED_CHECK(flags)) { return LSM_ERR_INVALID_ARGUMENT; } if (CHECK_RP(desc) || CHECK_RP(version)) { return LSM_ERR_INVALID_ARGUMENT; } try { Value parameters = _create_flag_param(flags); Value response; rc = rpc(c, "plugin_info", parameters, response); if (rc == LSM_ERR_OK) { std::vector < Value > j = response.asArray(); *desc = strdup(j[0].asC_str()); *version = strdup(j[1].asC_str()); if (!*desc || !*version) { rc = LSM_ERR_NO_MEMORY; free(*desc); free(*version); } } } catch(const ValueException & ve) { free(*desc); *desc = NULL; free(*version); *version = NULL; rc = log_exception(c, LSM_ERR_PLUGIN_BUG, "Unexpected type", ve.what()); } return rc; } int lsm_available_plugins_list(const char *sep, lsm_string_list ** plugins, lsm_flag flags) { int rc = LSM_ERR_OK; DIR *dirp = NULL; struct dirent *dp = NULL; lsm_connect *c = NULL; lsm_error_ptr e = NULL; char *desc = NULL; char *version = NULL; char *s = NULL; const char *uds_dir = uds_path(); lsm_string_list *plugin_list = NULL; if (CHECK_STR(sep) || CHECK_RP(plugins) || LSM_FLAG_UNUSED_CHECK(flags)) { return LSM_ERR_INVALID_ARGUMENT; } plugin_list = lsm_string_list_alloc(0); if (!plugin_list) { return LSM_ERR_NO_MEMORY; } dirp = opendir(uds_dir); if (dirp) { for (;;) { dp = readdir(dirp); if (NULL == dp) { break; } // Check to see if we have a socket if (DT_SOCK == dp->d_type) { c = connection_get(); if (c) { rc = driver_load(c, dp->d_name, NULL, 30000, &e, 0, 0); if (LSM_ERR_OK == rc) { // Get the plugin information rc = lsm_plugin_info_get(c, &desc, &version, 0); if (LSM_ERR_OK == rc) { int format = asprintf(&s, "%s%s%s", desc, sep, version); free(desc); desc = NULL; free(version); version = NULL; if (-1 == format) { rc = LSM_ERR_NO_MEMORY; break; } rc = lsm_string_list_append(plugin_list, s); free(s); s = NULL; if (LSM_ERR_OK != rc) { break; } } } else { break; } connection_free(c); c = NULL; } } } /* for(;;) */ if (e) { lsm_error_free(e); e = NULL; } if (c) { connection_free(c); c = NULL; } if (-1 == closedir(dirp)) { //log the error rc = LSM_ERR_LIB_BUG; } } else { /* If dirp == NULL */ //Log the error rc = LSM_ERR_LIB_BUG; } if (LSM_ERR_OK == rc) { *plugins = plugin_list; } else { lsm_string_list_free(plugin_list); plugin_list = NULL; } return rc; } int lsm_connect_timeout_set(lsm_connect * c, uint32_t timeout, lsm_flag flags) { CONN_SETUP(c); if (LSM_FLAG_UNUSED_CHECK(flags)) { return LSM_ERR_INVALID_ARGUMENT; } std::map < std::string, Value > p; p["ms"] = Value(timeout); p["flags"] = Value(flags); Value parameters(p); Value response; //No response data needed on set time out. return rpc(c, "time_out_set", parameters, response); } int lsm_connect_timeout_get(lsm_connect * c, uint32_t * timeout, lsm_flag flags) { int rc = LSM_ERR_OK; CONN_SETUP(c); if (LSM_FLAG_UNUSED_CHECK(flags)) { return LSM_ERR_INVALID_ARGUMENT; } try { Value parameters = _create_flag_param(flags); Value response; rc = rpc(c, "time_out_get", parameters, response); if (rc == LSM_ERR_OK) { *timeout = response.asUint32_t(); } } catch(const ValueException & ve) { rc = log_exception(c, LSM_ERR_PLUGIN_BUG, "Unexpected type", ve.what()); } return rc; } static int job_status(lsm_connect * c, const char *job, lsm_job_status * status, uint8_t * percentComplete, Value & returned_value, lsm_flag flags) { int rc = LSM_ERR_OK; CONN_SETUP(c); if (!job || !status || !percentComplete) { return LSM_ERR_INVALID_ARGUMENT; } try { std::map < std::string, Value > p; p["job_id"] = Value(job); p["flags"] = Value(flags); Value parameters(p); Value response; rc = rpc(c, "job_status", parameters, response); if (LSM_ERR_OK == rc) { //We get back an array [status, percent, volume] std::vector < Value > j = response.asArray(); *status = (lsm_job_status) j[0].asInt32_t(); *percentComplete = (uint8_t) j[1].asUint32_t(); returned_value = j[2]; } } catch(const ValueException & ve) { rc = log_exception(c, LSM_ERR_PLUGIN_BUG, "Unexpected type", ve.what()); } return rc; } int lsm_job_status_get(lsm_connect * c, const char *job_id, lsm_job_status * status, uint8_t * percentComplete, lsm_flag flags) { CONN_SETUP(c); if (LSM_FLAG_UNUSED_CHECK(flags)) { return LSM_ERR_INVALID_ARGUMENT; } Value rv; return job_status(c, job_id, status, percentComplete, rv, flags); } int lsm_job_status_pool_get(lsm_connect * c, const char *job, lsm_job_status * status, uint8_t * percentComplete, lsm_pool ** pool, lsm_flag flags) { Value rv; int rc = LSM_ERR_OK; CONN_SETUP(c); if (CHECK_RP(pool) || LSM_FLAG_UNUSED_CHECK(flags)) { return LSM_ERR_INVALID_ARGUMENT; } try { rc = job_status(c, job, status, percentComplete, rv, flags); if (LSM_ERR_OK == rc) { if (Value::object_t == rv.valueType()) { *pool = value_to_pool(rv); if (!(*pool)) { rc = LSM_ERR_NO_MEMORY; } } else { *pool = NULL; } } } catch(const ValueException & ve) { rc = log_exception(c, LSM_ERR_PLUGIN_BUG, "Unexpected type", ve.what()); } return rc; } int lsm_job_status_volume_get(lsm_connect * c, const char *job, lsm_job_status * status, uint8_t * percentComplete, lsm_volume ** vol, lsm_flag flags) { Value rv; int rc = LSM_ERR_OK; CONN_SETUP(c); if (CHECK_RP(vol) || LSM_FLAG_UNUSED_CHECK(flags)) { return LSM_ERR_INVALID_ARGUMENT; } try { rc = job_status(c, job, status, percentComplete, rv, flags); if (LSM_ERR_OK == rc) { if (Value::object_t == rv.valueType()) { *vol = value_to_volume(rv); if (!(*vol)) { rc = LSM_ERR_NO_MEMORY; } } else { *vol = NULL; } } } catch(const ValueException & ve) { rc = log_exception(c, LSM_ERR_PLUGIN_BUG, "Unexpected type", ve.what()); } return rc; } int lsm_job_status_fs_get(lsm_connect * c, const char *job, lsm_job_status * status, uint8_t * percentComplete, lsm_fs ** fs, lsm_flag flags) { int rc = LSM_ERR_OK; Value rv; if (CHECK_RP(fs) || LSM_FLAG_UNUSED_CHECK(flags)) { return LSM_ERR_INVALID_ARGUMENT; } try { rc = job_status(c, job, status, percentComplete, rv, flags); if (LSM_ERR_OK == rc) { if (Value::object_t == rv.valueType()) { *fs = value_to_fs(rv); if (!(*fs)) { rc = LSM_ERR_NO_MEMORY; } } else { *fs = NULL; } } } catch(const ValueException & ve) { rc = log_exception(c, LSM_ERR_PLUGIN_BUG, "Unexpected type", ve.what()); } return rc; } int lsm_job_status_ss_get(lsm_connect * c, const char *job, lsm_job_status * status, uint8_t * percentComplete, lsm_fs_ss ** ss, lsm_flag flags) { int rc = LSM_ERR_OK; Value rv; if (CHECK_RP(ss) || LSM_FLAG_UNUSED_CHECK(flags)) { return LSM_ERR_INVALID_ARGUMENT; } try { rc = job_status(c, job, status, percentComplete, rv, flags); if (LSM_ERR_OK == rc) { if (Value::object_t == rv.valueType()) { *ss = value_to_ss(rv); if (!(*ss)) { rc = LSM_ERR_NO_MEMORY; } } else { *ss = NULL; } } } catch(const ValueException & ve) { rc = log_exception(c, LSM_ERR_PLUGIN_BUG, "Unexpected type", ve.what()); } return rc; } int lsm_job_free(lsm_connect * c, char **job, lsm_flag flags) { CONN_SETUP(c); if (job == NULL || strlen(*job) < 1 || LSM_FLAG_UNUSED_CHECK(flags)) { return LSM_ERR_INVALID_ARGUMENT; } std::map < std::string, Value > p; p["job_id"] = Value(*job); p["flags"] = Value(flags); Value parameters(p); Value response; int rc = rpc(c, "job_free", parameters, response); if (LSM_ERR_OK == rc) { /* Free the memory for the job id */ free(*job); *job = NULL; } return rc; } int lsm_capabilities(lsm_connect * c, lsm_system * system, lsm_storage_capabilities ** cap, lsm_flag flags) { int rc = LSM_ERR_OK; CONN_SETUP(c); if (!LSM_IS_SYSTEM(system) || CHECK_RP(cap) || LSM_FLAG_UNUSED_CHECK(flags)) { return LSM_ERR_INVALID_ARGUMENT; } std::map < std::string, Value > p; p["system"] = system_to_value(system); p["flags"] = Value(flags); Value parameters(p); Value response; try { rc = rpc(c, "capabilities", parameters, response); if (LSM_ERR_OK == rc && Value::object_t == response.valueType()) { *cap = value_to_capabilities(response); if (!(*cap)) { rc = LSM_ERR_NO_MEMORY; } } } catch(const ValueException & ve) { rc = log_exception(c, LSM_ERR_PLUGIN_BUG, "Unexpected type", ve.what()); } return rc; } int lsm_pool_list(lsm_connect * c, char *search_key, char *search_value, lsm_pool ** poolArray[], uint32_t * count, lsm_flag flags) { int rc = LSM_ERR_OK; CONN_SETUP(c); if (!poolArray || !count || CHECK_RP(poolArray)) { return LSM_ERR_INVALID_ARGUMENT; } *count = 0; *poolArray = NULL; try { std::map < std::string, Value > p; rc = add_search_params(p, search_key, search_value, POOL_SEARCH_KEYS, POOL_SEARCH_KEYS_COUNT); if (LSM_ERR_OK != rc) { return rc; } p["flags"] = Value(flags); Value parameters(p); Value response; rc = rpc(c, "pools", parameters, response); if (LSM_ERR_OK == rc && Value::array_t == response.valueType()) { std::vector < Value > pools = response.asArray(); *count = pools.size(); if (pools.size()) { *poolArray = lsm_pool_record_array_alloc(pools.size()); for (size_t i = 0; i < pools.size(); ++i) { (*poolArray)[i] = value_to_pool(pools[i]); if (!(*poolArray)[i]) { rc = LSM_ERR_NO_MEMORY; goto error; } } } } } catch(const ValueException & ve) { rc = log_exception(c, LSM_ERR_PLUGIN_BUG, "Unexpected type", ve.what()); goto error; } out: return rc; error: if (*poolArray && *count) { lsm_pool_record_array_free(*poolArray, *count); *poolArray = NULL; *count = 0; } goto out; } int lsm_pool_member_info(lsm_connect * c, lsm_pool * pool, lsm_volume_raid_type * raid_type, lsm_pool_member_type * member_type, lsm_string_list ** member_ids, lsm_flag flags) { if (LSM_FLAG_UNUSED_CHECK(flags)) { return LSM_ERR_INVALID_ARGUMENT; } int rc = LSM_ERR_OK; CONN_SETUP(c); if (!LSM_IS_POOL(pool)) { return LSM_ERR_INVALID_ARGUMENT; } if (!raid_type || !member_type || !member_ids) { return LSM_ERR_INVALID_ARGUMENT; } std::map < std::string, Value > p; p["pool"] = pool_to_value(pool); p["flags"] = Value(flags); Value parameters(p); try { Value response; rc = rpc(c, "pool_member_info", parameters, response); if (LSM_ERR_OK == rc) { std::vector < Value > j = response.asArray(); *raid_type = (lsm_volume_raid_type) j[0].asInt32_t(); *member_type = (lsm_pool_member_type) j[1].asInt32_t(); *member_ids = NULL; if (Value::array_t == j[2].valueType()) { if (j[2].asArray().size()) { *member_ids = value_to_string_list(j[2]); if (*member_ids == NULL) { return LSM_ERR_NO_MEMORY; } else if (lsm_string_list_size(*member_ids) != j[2].asArray().size()) { lsm_string_list_free(*member_ids); return LSM_ERR_NO_MEMORY; } } } else { rc = log_exception(c, LSM_ERR_PLUGIN_BUG, "member_ids data is not an array", "member_ids data is not an array"); } } } catch(const ValueException & ve) { rc = log_exception(c, LSM_ERR_PLUGIN_BUG, "Unexpected type", ve.what()); } return rc; } int lsm_target_port_list(lsm_connect * c, const char *search_key, const char *search_value, lsm_target_port ** target_ports[], uint32_t * count, lsm_flag flags) { int rc = LSM_ERR_OK; CONN_SETUP(c); if (!target_ports || !count || CHECK_RP(target_ports)) { return LSM_ERR_INVALID_ARGUMENT; } try { std::map < std::string, Value > p; rc = add_search_params(p, search_key, search_value, TARGET_PORT_SEARCH_KEYS, TARGET_PORT_SEARCH_KEYS_COUNT); if (LSM_ERR_OK != rc) { return rc; } p["flags"] = Value(flags); Value parameters(p); Value response; rc = rpc(c, "target_ports", parameters, response); if (LSM_ERR_OK == rc && Value::array_t == response.valueType()) { std::vector < Value > tp = response.asArray(); *count = tp.size(); if (tp.size()) { *target_ports = lsm_target_port_record_array_alloc(tp.size()); for (size_t i = 0; i < tp.size(); ++i) { (*target_ports)[i] = value_to_target_port(tp[i]); if (!((*target_ports)[i])) { rc = LSM_ERR_NO_MEMORY; goto error; } } } } } catch(const ValueException & ve) { rc = log_exception(c, LSM_ERR_PLUGIN_BUG, "Unexpected type", ve.what()); goto error; } out: return rc; error: if (*target_ports && *count) { lsm_target_port_record_array_free(*target_ports, *count); *target_ports = NULL; *count = 0; } goto out; } static int get_volume_array(lsm_connect * c, int rc, Value & response, lsm_volume ** volumes[], uint32_t * count) { if (LSM_ERR_OK == rc && Value::array_t == response.valueType()) { try { rc = value_array_to_volumes(response, volumes, count); } catch(const ValueException & ve) { rc = log_exception(c, LSM_ERR_PLUGIN_BUG, "Wrong type", ve.what()); } } return rc; } int lsm_volume_list(lsm_connect * c, const char *search_key, const char *search_value, lsm_volume ** volumes[], uint32_t * count, lsm_flag flags) { CONN_SETUP(c); if (!volumes || !count || CHECK_RP(volumes)) { return LSM_ERR_INVALID_ARGUMENT; } std::map < std::string, Value > p; p["flags"] = Value(flags); int rc = add_search_params(p, search_key, search_value, VOLUME_SEARCH_KEYS, VOLUME_SEARCH_KEYS_COUNT); if (LSM_ERR_OK != rc) { return rc; } Value parameters(p); Value response; rc = rpc(c, "volumes", parameters, response); return get_volume_array(c, rc, response, volumes, count); } static int get_disk_array(lsm_connect * c, int rc, Value & response, lsm_disk ** disks[], uint32_t * count) { if (LSM_ERR_OK == rc && Value::array_t == response.valueType()) { rc = value_array_to_disks(response, disks, count); if (LSM_ERR_OK != rc) { rc = log_exception(c, LSM_ERR_PLUGIN_BUG, "Unexpected type", NULL); } } return rc; } int lsm_disk_list(lsm_connect * c, const char *search_key, const char *search_value, lsm_disk ** disks[], uint32_t * count, lsm_flag flags) { CONN_SETUP(c); if (CHECK_RP(disks) || !count) { return LSM_ERR_INVALID_ARGUMENT; } std::map < std::string, Value > p; p["flags"] = Value(flags); int rc = add_search_params(p, search_key, search_value, DISK_SEARCH_KEYS, DISK_SEARCH_KEYS_COUNT); if (LSM_ERR_OK != rc) { return rc; } Value parameters(p); Value response; rc = rpc(c, "disks", parameters, response); return get_disk_array(c, rc, response, disks, count); } typedef void *(*convert) (Value & v); static void *parse_job_response(lsm_connect * c, Value response, int &rc, char **job, convert conv) { void *val = NULL; *job = NULL; try { //We get an array back. first value is job, second is data of interest. if (Value::array_t == response.valueType()) { std::vector < Value > r = response.asArray(); if (Value::string_t == r[0].valueType()) { *job = strdup((r[0].asString()).c_str()); if (!(*job)) { rc = LSM_ERR_NO_MEMORY; goto error; } rc = LSM_ERR_JOB_STARTED; } if (Value::object_t == r[1].valueType()) { val = conv(r[1]); if (!val) { rc = LSM_ERR_NO_MEMORY; goto error; } } } } catch(const ValueException & ve) { rc = log_exception(c, LSM_ERR_PLUGIN_BUG, "Unexpected type", ve.what()); goto error; } out: return val; error: free(*job); *job = NULL; goto out; } int lsm_volume_create(lsm_connect * c, lsm_pool * pool, const char *volumeName, uint64_t size, lsm_volume_provision_type provisioning, lsm_volume ** newVolume, char **job, lsm_flag flags) { CONN_SETUP(c); if (!LSM_IS_POOL(pool)) { return LSM_ERR_INVALID_ARGUMENT; } if (CHECK_STR(volumeName) || !size || CHECK_RP(newVolume) || CHECK_RP(job) || LSM_FLAG_UNUSED_CHECK(flags)) { return LSM_ERR_INVALID_ARGUMENT; } std::map < std::string, Value > p; p["pool"] = pool_to_value(pool); p["volume_name"] = Value(volumeName); p["size_bytes"] = Value(size); p["provisioning"] = Value((int32_t) provisioning); p["flags"] = Value(flags); Value parameters(p); Value response; int rc = rpc(c, "volume_create", parameters, response); if (LSM_ERR_OK == rc) { *newVolume = (lsm_volume *) parse_job_response(c, response, rc, job, (convert) value_to_volume); } return rc; } int lsm_volume_resize(lsm_connect * c, lsm_volume * volume, uint64_t newSize, lsm_volume ** resizedVolume, char **job, lsm_flag flags) { CONN_SETUP(c); if (!LSM_IS_VOL(volume) || !newSize || CHECK_RP(resizedVolume) || CHECK_RP(job) || newSize == 0 || LSM_FLAG_UNUSED_CHECK(flags)) { return LSM_ERR_INVALID_ARGUMENT; } //If you try to resize to same size, we will return error. if ((newSize / volume->block_size) == volume->number_of_blocks) { return LSM_ERR_NO_STATE_CHANGE; } std::map < std::string, Value > p; p["volume"] = volume_to_value(volume); p["new_size_bytes"] = Value(newSize); p["flags"] = Value(flags); Value parameters(p); Value response; int rc = rpc(c, "volume_resize", parameters, response); if (LSM_ERR_OK == rc) { *resizedVolume = (lsm_volume *) parse_job_response(c, response, rc, job, (convert) value_to_volume); } return rc; } int lsm_volume_replicate(lsm_connect * c, lsm_pool * pool, lsm_replication_type repType, lsm_volume * volumeSrc, const char *name, lsm_volume ** newReplicant, char **job, lsm_flag flags) { CONN_SETUP(c); if ((pool && !LSM_IS_POOL(pool)) || !LSM_IS_VOL(volumeSrc)) { return LSM_ERR_INVALID_ARGUMENT; } if (CHECK_STR(name) || CHECK_RP(newReplicant) || CHECK_RP(job) || LSM_FLAG_UNUSED_CHECK(flags)) { return LSM_ERR_INVALID_ARGUMENT; } std::map < std::string, Value > p; p["pool"] = pool_to_value(pool); p["rep_type"] = Value((int32_t) repType); p["volume_src"] = volume_to_value(volumeSrc); p["name"] = Value(name); p["flags"] = Value(flags); Value parameters(p); Value response; int rc = rpc(c, "volume_replicate", parameters, response); if (LSM_ERR_OK == rc) { *newReplicant = (lsm_volume *) parse_job_response(c, response, rc, job, (convert) value_to_volume); } return rc; } int lsm_volume_replicate_range_block_size(lsm_connect * c, lsm_system * system, uint32_t * bs, lsm_flag flags) { int rc = LSM_ERR_OK; CONN_SETUP(c); if (!bs || LSM_FLAG_UNUSED_CHECK(flags) || !LSM_IS_SYSTEM(system)) { return LSM_ERR_INVALID_ARGUMENT; } try { std::map < std::string, Value > p; p["system"] = system_to_value(system); p["flags"] = Value(flags); Value parameters(p); Value response; rc = rpc(c, "volume_replicate_range_block_size", parameters, response); if (LSM_ERR_OK == rc) { if (Value::numeric_t == response.valueType()) { *bs = response.asUint32_t(); } } } catch(const ValueException & ve) { rc = log_exception(c, LSM_ERR_PLUGIN_BUG, "Unexpected type", ve.what()); } return rc; } int lsm_volume_replicate_range(lsm_connect * c, lsm_replication_type repType, lsm_volume * source, lsm_volume * dest, lsm_block_range ** ranges, uint32_t num_ranges, char **job, lsm_flag flags) { CONN_SETUP(c); if (!LSM_IS_VOL(source) || !LSM_IS_VOL(dest)) { return LSM_ERR_INVALID_ARGUMENT; } if (!ranges || !num_ranges || CHECK_RP(job) || LSM_FLAG_UNUSED_CHECK(flags)) { return LSM_ERR_INVALID_ARGUMENT; } std::map < std::string, Value > p; p["rep_type"] = Value((int32_t) repType); p["volume_src"] = volume_to_value(source); p["volume_dest"] = volume_to_value(dest); p["ranges"] = block_range_list_to_value(ranges, num_ranges); p["flags"] = Value(flags); Value parameters(p); Value response; int rc = rpc(c, "volume_replicate_range", parameters, response); return job_check(c, rc, response, job); } static Value _create_volume_flag_param(lsm_volume * volume, lsm_flag flags) { std::map < std::string, Value > p; p["volume"] = volume_to_value(volume); p["flags"] = Value(flags); return Value(p); } int lsm_volume_delete(lsm_connect * c, lsm_volume * volume, char **job, lsm_flag flags) { int rc = LSM_ERR_OK; CONN_SETUP(c); if (!LSM_IS_VOL(volume)) { return LSM_ERR_INVALID_ARGUMENT; } if (CHECK_RP(job) || LSM_FLAG_UNUSED_CHECK(flags)) { return LSM_ERR_INVALID_ARGUMENT; } try { Value parameters = _create_volume_flag_param(volume, flags); Value response; rc = rpc(c, "volume_delete", parameters, response); rc = job_check(c, rc, response, job); } catch(const ValueException & ve) { rc = log_exception(c, LSM_ERR_PLUGIN_BUG, "Unexpected type", ve.what()); } return rc; } int lsm_volume_raid_info(lsm_connect * c, lsm_volume * volume, lsm_volume_raid_type * raid_type, uint32_t * strip_size, uint32_t * disk_count, uint32_t * min_io_size, uint32_t * opt_io_size, lsm_flag flags) { if (LSM_FLAG_UNUSED_CHECK(flags)) { return LSM_ERR_INVALID_ARGUMENT; } int rc = LSM_ERR_OK; CONN_SETUP(c); if (!LSM_IS_VOL(volume)) { return LSM_ERR_INVALID_ARGUMENT; } if (!raid_type || !strip_size || !disk_count || !min_io_size || !opt_io_size) { return LSM_ERR_INVALID_ARGUMENT; } try { Value parameters = _create_volume_flag_param(volume, flags); Value response; rc = rpc(c, "volume_raid_info", parameters, response); if (LSM_ERR_OK == rc) { //We get a value back, either null or job id. std::vector < Value > j = response.asArray(); *raid_type = (lsm_volume_raid_type) j[0].asInt32_t(); *strip_size = j[1].asUint32_t(); *disk_count = j[2].asUint32_t(); *min_io_size = j[3].asUint32_t(); *opt_io_size = j[4].asUint32_t(); } } catch(const ValueException & ve) { rc = log_exception(c, LSM_ERR_PLUGIN_BUG, "Unexpected type", ve.what()); } return rc; } int lsm_iscsi_chap_auth(lsm_connect * c, const char *init_id, const char *username, const char *password, const char *out_user, const char *out_password, lsm_flag flags) { CONN_SETUP(c); if (iqn_validate(init_id) || LSM_FLAG_UNUSED_CHECK(flags)) { return LSM_ERR_INVALID_ARGUMENT; } std::map < std::string, Value > p; p["init_id"] = Value(init_id); p["in_user"] = Value(username); p["in_password"] = Value(password); p["out_user"] = Value(out_user); p["out_password"] = Value(out_password); p["flags"] = Value(flags); Value parameters(p); Value response; return rpc(c, "iscsi_chap_auth", parameters, response); } static int online_offline(lsm_connect * c, lsm_volume * v, const char *operation, lsm_flag flags) { CONN_SETUP(c); if (!LSM_IS_VOL(v)) { return LSM_ERR_INVALID_ARGUMENT; } if (LSM_FLAG_UNUSED_CHECK(flags)) { return LSM_ERR_INVALID_ARGUMENT; } std::map < std::string, Value > p; p["volume"] = volume_to_value(v); p["flags"] = Value(flags); Value parameters(p); Value response; return rpc(c, operation, parameters, response); } int lsm_volume_enable(lsm_connect * c, lsm_volume * volume, lsm_flag flags) { return online_offline(c, volume, "volume_enable", flags); } int lsm_volume_disable(lsm_connect * c, lsm_volume * volume, lsm_flag flags) { return online_offline(c, volume, "volume_disable", flags); } int lsm_access_group_list(lsm_connect * c, const char *search_key, const char *search_value, lsm_access_group ** groups[], uint32_t * groupCount, lsm_flag flags) { CONN_SETUP(c); if (!groups || !groupCount) { return LSM_ERR_INVALID_ARGUMENT; } std::map < std::string, Value > p; int rc = add_search_params(p, search_key, search_value, ACCESS_GROUP_SEARCH_KEYS, ACCESS_GROUP_SEARCH_KEYS_COUNT); if (LSM_ERR_OK != rc) { return rc; } p["flags"] = Value(flags); Value parameters(p); Value response; rc = rpc(c, "access_groups", parameters, response); return get_access_groups(c, rc, response, groups, groupCount); } int lsm_access_group_create(lsm_connect * c, const char *name, const char *init_id, lsm_access_group_init_type init_type, lsm_system * system, lsm_access_group ** access_group, lsm_flag flags) { CONN_SETUP(c); if (!LSM_IS_SYSTEM(system) || CHECK_STR(name) || CHECK_STR(init_id) || CHECK_RP(access_group) || LSM_FLAG_UNUSED_CHECK(flags)) { return LSM_ERR_INVALID_ARGUMENT; } Value id; if (LSM_ERR_OK != verify_initiator_id(init_id, init_type, id)) { return LSM_ERR_INVALID_ARGUMENT; } std::map < std::string, Value > p; p["name"] = Value(name); p["init_id"] = id; p["init_type"] = Value((int32_t) init_type); p["system"] = system_to_value(system); p["flags"] = Value(flags); Value parameters(p); Value response; *access_group = NULL; int rc = rpc(c, "access_group_create", parameters, response); try { if (LSM_ERR_OK == rc) { //We should be getting a value back. if (Value::object_t == response.valueType()) { *access_group = value_to_access_group(response); if (!(*access_group)) { rc = LSM_ERR_NO_MEMORY; } } } } catch(const ValueException & ve) { rc = log_exception(c, LSM_ERR_PLUGIN_BUG, "Unexpected type", ve.what()); } return rc; } int lsm_access_group_delete(lsm_connect * c, lsm_access_group * access_group, lsm_flag flags) { CONN_SETUP(c); if (!LSM_IS_ACCESS_GROUP(access_group) || LSM_FLAG_UNUSED_CHECK(flags)) { return LSM_ERR_INVALID_ARGUMENT; } std::map < std::string, Value > p; p["access_group"] = access_group_to_value(access_group); p["flags"] = Value(flags); Value parameters(p); Value response; return rpc(c, "access_group_delete", parameters, response); } static int _lsm_ag_add_delete(lsm_connect * c, lsm_access_group * access_group, const char *init_id, lsm_access_group_init_type init_type, lsm_access_group ** updated_access_group, lsm_flag flags, const char *message) { CONN_SETUP(c); if (!LSM_IS_ACCESS_GROUP(access_group) || CHECK_STR(init_id) || LSM_FLAG_UNUSED_CHECK(flags) || CHECK_RP(updated_access_group)) { return LSM_ERR_INVALID_ARGUMENT; } Value id; if (LSM_ERR_OK != verify_initiator_id(init_id, init_type, id)) { return LSM_ERR_INVALID_ARGUMENT; } std::map < std::string, Value > p; p["access_group"] = access_group_to_value(access_group); p["init_id"] = id; p["init_type"] = Value((int32_t) init_type); p["flags"] = Value(flags); Value parameters(p); Value response; int rc = rpc(c, message, parameters, response); try { if (LSM_ERR_OK == rc) { //We should be getting a value back. if (Value::object_t == response.valueType()) { *updated_access_group = value_to_access_group(response); if (!(*updated_access_group)) { rc = LSM_ERR_NO_MEMORY; } } } } catch(const ValueException & ve) { rc = log_exception(c, LSM_ERR_PLUGIN_BUG, "Unexpected type", ve.what()); } return rc; } int lsm_access_group_initiator_add(lsm_connect * c, lsm_access_group * access_group, const char *init_id, lsm_access_group_init_type init_type, lsm_access_group ** updated_access_group, lsm_flag flags) { return _lsm_ag_add_delete(c, access_group, init_id, init_type, updated_access_group, flags, "access_group_initiator_add"); } int lsm_access_group_initiator_delete(lsm_connect * c, lsm_access_group * access_group, const char *init_id, lsm_access_group_init_type init_type, lsm_access_group ** updated_access_group, lsm_flag flags) { return _lsm_ag_add_delete(c, access_group, init_id, init_type, updated_access_group, flags, "access_group_initiator_delete"); } int lsm_volume_mask(lsm_connect * c, lsm_access_group * access_group, lsm_volume * volume, lsm_flag flags) { CONN_SETUP(c); if (!LSM_IS_ACCESS_GROUP(access_group) || !LSM_IS_VOL(volume) || LSM_FLAG_UNUSED_CHECK(flags)) { return LSM_ERR_INVALID_ARGUMENT; } std::map < std::string, Value > p; p["access_group"] = access_group_to_value(access_group); p["volume"] = volume_to_value(volume); p["flags"] = Value(flags); Value parameters(p); Value response; return rpc(c, "volume_mask", parameters, response); } int lsm_volume_unmask(lsm_connect * c, lsm_access_group * group, lsm_volume * volume, lsm_flag flags) { CONN_SETUP(c); if (!LSM_IS_ACCESS_GROUP(group) || !LSM_IS_VOL(volume) || LSM_FLAG_UNUSED_CHECK(flags)) { return LSM_ERR_INVALID_ARGUMENT; } std::map < std::string, Value > p; p["access_group"] = access_group_to_value(group); p["volume"] = volume_to_value(volume); p["flags"] = Value(flags); Value parameters(p); Value response; return rpc(c, "volume_unmask", parameters, response); } int lsm_volumes_accessible_by_access_group(lsm_connect * c, lsm_access_group * group, lsm_volume ** volumes[], uint32_t * count, lsm_flag flags) { int rc = LSM_ERR_OK; CONN_SETUP(c); if (!LSM_IS_ACCESS_GROUP(group) || !volumes || !count || LSM_FLAG_UNUSED_CHECK(flags)) { return LSM_ERR_INVALID_ARGUMENT; } try { std::map < std::string, Value > p; p["access_group"] = access_group_to_value(group); p["flags"] = Value(flags); Value parameters(p); Value response; rc = rpc(c, "volumes_accessible_by_access_group", parameters, response); if (LSM_ERR_OK == rc && Value::array_t == response.valueType()) { std::vector < Value > vol = response.asArray(); *count = vol.size(); if (vol.size()) { *volumes = lsm_volume_record_array_alloc(vol.size()); if (*volumes) { for (size_t i = 0; i < vol.size(); ++i) { (*volumes)[i] = value_to_volume(vol[i]); if (!((*volumes)[i])) { rc = LSM_ERR_NO_MEMORY; goto error; } } } else { rc = LSM_ERR_NO_MEMORY; } } } } catch(const ValueException & ve) { rc = log_exception(c, LSM_ERR_PLUGIN_BUG, "Unexpected type", ve.what()); goto error; } out: return rc; error: if (*volumes && *count) { lsm_volume_record_array_free(*volumes, *count); *volumes = NULL; *count = 0; } goto out; } int lsm_access_groups_granted_to_volume(lsm_connect * c, lsm_volume * volume, lsm_access_group ** groups[], uint32_t * groupCount, lsm_flag flags) { CONN_SETUP(c); if (!LSM_IS_VOL(volume)) { return LSM_ERR_INVALID_ARGUMENT; } if (!groups || !groupCount || LSM_FLAG_UNUSED_CHECK(flags)) { return LSM_ERR_INVALID_ARGUMENT; } std::map < std::string, Value > p; p["volume"] = volume_to_value(volume); p["flags"] = Value(flags); Value parameters(p); Value response; int rc = rpc(c, "access_groups_granted_to_volume", parameters, response); return get_access_groups(c, rc, response, groups, groupCount); } static int _retrieve_bool(int rc, Value & response, uint8_t * yes) { int rc_out = rc; *yes = 0; if (LSM_ERR_OK == rc) { //We should be getting a boolean value back. if (Value::boolean_t == response.valueType()) { if (response.asBool()) { *yes = 1; } } else { rc_out = LSM_ERR_PLUGIN_BUG; } } return rc_out; } int lsm_volume_child_dependency(lsm_connect * c, lsm_volume * volume, uint8_t * yes, lsm_flag flags) { int rc = LSM_ERR_OK; CONN_SETUP(c); if (!LSM_IS_VOL(volume)) { return LSM_ERR_INVALID_ARGUMENT; } if (!yes || LSM_FLAG_UNUSED_CHECK(flags)) { return LSM_ERR_INVALID_ARGUMENT; } try { Value parameters = _create_volume_flag_param(volume, flags); Value response; rc = rpc(c, "volume_child_dependency", parameters, response); rc = _retrieve_bool(rc, response, yes); } catch(const ValueException & ve) { rc = log_exception(c, LSM_ERR_PLUGIN_BUG, "Unexpected type", ve.what()); } return rc; } int lsm_volume_child_dependency_delete(lsm_connect * c, lsm_volume * volume, char **job, lsm_flag flags) { CONN_SETUP(c); if (!LSM_IS_VOL(volume) || CHECK_RP(job) || LSM_FLAG_UNUSED_CHECK(flags)) { return LSM_ERR_INVALID_ARGUMENT; } Value parameters = _create_volume_flag_param(volume, flags); Value response; int rc = rpc(c, "volume_child_dependency_rm", parameters, response); return job_check(c, rc, response, job); } int lsm_system_list(lsm_connect * c, lsm_system ** systems[], uint32_t * systemCount, lsm_flag flags) { int rc = LSM_ERR_OK; CONN_SETUP(c); if (!systems || !systemCount) { return LSM_ERR_INVALID_ARGUMENT; } try { std::map < std::string, Value > p; p["flags"] = Value(flags); Value parameters(p); Value response; rc = rpc(c, "systems", parameters, response); if (LSM_ERR_OK == rc && Value::array_t == response.valueType()) { std::vector < Value > sys = response.asArray(); *systemCount = sys.size(); if (sys.size()) { *systems = lsm_system_record_array_alloc(sys.size()); if (*systems) { for (size_t i = 0; i < sys.size(); ++i) { (*systems)[i] = value_to_system(sys[i]); if (!(*systems)[i]) { rc = LSM_ERR_NO_MEMORY; goto error; } } } else { rc = LSM_ERR_NO_MEMORY; } } } } catch(const ValueException & ve) { rc = log_exception(c, LSM_ERR_PLUGIN_BUG, "Unexpected type", ve.what()); goto error; } out: return rc; error: if (*systems) { lsm_system_record_array_free(*systems, *systemCount); *systems = NULL; *systemCount = 0; } goto out; } int lsm_fs_list(lsm_connect * c, const char *search_key, const char *search_value, lsm_fs ** fs[], uint32_t * fsCount, lsm_flag flags) { int rc = LSM_ERR_OK; CONN_SETUP(c); if (!fs || !fsCount) { return LSM_ERR_INVALID_ARGUMENT; } try { std::map < std::string, Value > p; int rc = add_search_params(p, search_key, search_value, FS_SEARCH_KEYS, FS_SEARCH_KEYS_COUNT); if (LSM_ERR_OK != rc) { return rc; } p["flags"] = Value(flags); Value parameters(p); Value response; rc = rpc(c, "fs", parameters, response); if (LSM_ERR_OK == rc && Value::array_t == response.valueType()) { std::vector < Value > sys = response.asArray(); *fsCount = sys.size(); if (sys.size()) { *fs = lsm_fs_record_array_alloc(sys.size()); if (*fs) { for (size_t i = 0; i < sys.size(); ++i) { (*fs)[i] = value_to_fs(sys[i]); if (!((*fs)[i])) { rc = LSM_ERR_NO_MEMORY; goto error; } } } else { rc = LSM_ERR_NO_MEMORY; } } } } catch(const ValueException & ve) { rc = log_exception(c, LSM_ERR_PLUGIN_BUG, "Unexpected type", ve.what()); goto error; } out: return rc; error: if (*fs && *fsCount) { lsm_fs_record_array_free(*fs, *fsCount); *fs = NULL; *fsCount = 0; } goto out; } int lsm_fs_create(lsm_connect * c, lsm_pool * pool, const char *name, uint64_t size_bytes, lsm_fs ** fs, char **job, lsm_flag flags) { CONN_SETUP(c); if (!LSM_IS_POOL(pool)) { return LSM_ERR_INVALID_ARGUMENT; } if (CHECK_STR(name) || !size_bytes || CHECK_RP(fs) || CHECK_RP(job) || LSM_FLAG_UNUSED_CHECK(flags)) { return LSM_ERR_INVALID_ARGUMENT; } std::map < std::string, Value > p; p["pool"] = pool_to_value(pool); p["name"] = Value(name); p["size_bytes"] = Value(size_bytes); p["flags"] = Value(flags); Value parameters(p); Value response; int rc = rpc(c, "fs_create", parameters, response); if (LSM_ERR_OK == rc) { *fs = (lsm_fs *) parse_job_response(c, response, rc, job, (convert) value_to_fs); } return rc; } static Value _create_fs_flag_param(lsm_fs * fs, lsm_flag flags) { std::map < std::string, Value > p; p["fs"] = fs_to_value(fs); p["flags"] = Value(flags); return Value(p); } int lsm_fs_delete(lsm_connect * c, lsm_fs * fs, char **job, lsm_flag flags) { CONN_SETUP(c); if (!LSM_IS_FS(fs) || CHECK_RP(job) || LSM_FLAG_UNUSED_CHECK(flags)) { return LSM_ERR_INVALID_ARGUMENT; } Value parameters = _create_fs_flag_param(fs, flags); Value response; int rc = rpc(c, "fs_delete", parameters, response); return job_check(c, rc, response, job); } int lsm_fs_resize(lsm_connect * c, lsm_fs * fs, uint64_t new_size_bytes, lsm_fs ** rfs, char **job, lsm_flag flags) { CONN_SETUP(c); if (!LSM_IS_FS(fs) || !new_size_bytes || CHECK_RP(rfs) || CHECK_RP(job) || LSM_FLAG_UNUSED_CHECK(flags)) { return LSM_ERR_INVALID_ARGUMENT; } std::map < std::string, Value > p; p["fs"] = fs_to_value(fs); p["new_size_bytes"] = Value(new_size_bytes); p["flags"] = Value(flags); Value parameters(p); Value response; int rc = rpc(c, "fs_resize", parameters, response); if (LSM_ERR_OK == rc) { *rfs = (lsm_fs *) parse_job_response(c, response, rc, job, (convert) value_to_fs); } return rc; } int lsm_fs_clone(lsm_connect * c, lsm_fs * src_fs, const char *name, lsm_fs_ss * optional_ss, lsm_fs ** cloned_fs, char **job, lsm_flag flags) { CONN_SETUP(c); if (!LSM_IS_FS(src_fs) || CHECK_STR(name) || CHECK_RP(cloned_fs) || CHECK_RP(job) || LSM_FLAG_UNUSED_CHECK(flags)) { return LSM_ERR_INVALID_ARGUMENT; } std::map < std::string, Value > p; p["src_fs"] = fs_to_value(src_fs); p["dest_fs_name"] = Value(name); p["snapshot"] = ss_to_value(optional_ss); p["flags"] = Value(flags); Value parameters(p); Value response; int rc = rpc(c, "fs_clone", parameters, response); if (LSM_ERR_OK == rc) { *cloned_fs = (lsm_fs *) parse_job_response(c, response, rc, job, (convert) value_to_fs); } return rc; } int lsm_fs_file_clone(lsm_connect * c, lsm_fs * fs, const char *src_file_name, const char *dest_file_name, lsm_fs_ss * snapshot, char **job, lsm_flag flags) { CONN_SETUP(c); if (!LSM_IS_FS(fs) || CHECK_STR(src_file_name) || CHECK_STR(dest_file_name) || CHECK_RP(job) || LSM_FLAG_UNUSED_CHECK(flags)) { return LSM_ERR_INVALID_ARGUMENT; } std::map < std::string, Value > p; p["fs"] = fs_to_value(fs); p["src_file_name"] = Value(src_file_name); p["dest_file_name"] = Value(dest_file_name); p["snapshot"] = ss_to_value(snapshot); p["flags"] = Value(flags); Value parameters(p); Value response; int rc = rpc(c, "fs_file_clone", parameters, response); return job_check(c, rc, response, job); } static Value _create_fs_file_flag_params(lsm_fs * fs, lsm_string_list * files, lsm_flag flags) { std::map < std::string, Value > p; p["fs"] = fs_to_value(fs); p["files"] = string_list_to_value(files); p["flags"] = Value(flags); return Value(p); } int lsm_fs_child_dependency(lsm_connect * c, lsm_fs * fs, lsm_string_list * files, uint8_t * yes, lsm_flag flags) { int rc = LSM_ERR_OK; CONN_SETUP(c); if (!LSM_IS_FS(fs)) { return LSM_ERR_INVALID_ARGUMENT; } if (files) { if (!LSM_IS_STRING_LIST(files)) { return LSM_ERR_INVALID_ARGUMENT; } } if (!yes || LSM_FLAG_UNUSED_CHECK(flags)) { return LSM_ERR_INVALID_ARGUMENT; } try { Value parameters = _create_fs_file_flag_params(fs, files, flags); Value response; rc = rpc(c, "fs_child_dependency", parameters, response); rc = _retrieve_bool(rc, response, yes); } catch(const ValueException & ve) { rc = log_exception(c, LSM_ERR_PLUGIN_BUG, "Unexpected type", ve.what()); } return rc; } int lsm_fs_child_dependency_delete(lsm_connect * c, lsm_fs * fs, lsm_string_list * files, char **job, lsm_flag flags) { CONN_SETUP(c); if (!LSM_IS_FS(fs)) { return LSM_ERR_INVALID_ARGUMENT; } if (files) { if (!LSM_IS_STRING_LIST(files)) { return LSM_ERR_INVALID_ARGUMENT; } } if (CHECK_RP(job) || LSM_FLAG_UNUSED_CHECK(flags)) { return LSM_ERR_INVALID_ARGUMENT; } Value parameters = _create_fs_file_flag_params(fs, files, flags); Value response; int rc = rpc(c, "fs_child_dependency_rm", parameters, response); return job_check(c, rc, response, job); } int lsm_fs_ss_list(lsm_connect * c, lsm_fs * fs, lsm_fs_ss ** ss[], uint32_t * ssCount, lsm_flag flags) { int rc = LSM_ERR_OK; CONN_SETUP(c); if (!LSM_IS_FS(fs)) { return LSM_ERR_INVALID_ARGUMENT; } if (CHECK_RP(ss) || !ssCount) { return LSM_ERR_INVALID_ARGUMENT; } Value parameters = _create_fs_flag_param(fs, flags); Value response; try { rc = rpc(c, "fs_snapshots", parameters, response); if (LSM_ERR_OK == rc && Value::array_t == response.valueType()) { std::vector < Value > sys = response.asArray(); *ssCount = sys.size(); if (sys.size()) { *ss = lsm_fs_ss_record_array_alloc(sys.size()); if (*ss) { for (size_t i = 0; i < sys.size(); ++i) { (*ss)[i] = value_to_ss(sys[i]); if (!((*ss)[i])) { rc = LSM_ERR_NO_MEMORY; goto error; } } } else { rc = LSM_ERR_NO_MEMORY; } } } } catch(const ValueException & ve) { rc = log_exception(c, LSM_ERR_PLUGIN_BUG, "Unexpected type", ve.what()); goto error; } out: return rc; error: if (*ss && *ssCount) { lsm_fs_ss_record_array_free(*ss, *ssCount); *ss = NULL; *ssCount = 0; } goto out; } int lsm_fs_ss_create(lsm_connect * c, lsm_fs * fs, const char *name, lsm_fs_ss ** snapshot, char **job, lsm_flag flags) { CONN_SETUP(c); if (!LSM_IS_FS(fs)) { return LSM_ERR_INVALID_ARGUMENT; } if (CHECK_STR(name) || CHECK_RP(snapshot) || CHECK_RP(job) || LSM_FLAG_UNUSED_CHECK(flags)) { return LSM_ERR_INVALID_ARGUMENT; } std::map < std::string, Value > p; p["fs"] = fs_to_value(fs); p["snapshot_name"] = Value(name); p["flags"] = Value(flags); Value parameters(p); Value response; int rc = rpc(c, "fs_snapshot_create", parameters, response); if (LSM_ERR_OK == rc) { *snapshot = (lsm_fs_ss *) parse_job_response(c, response, rc, job, (convert) value_to_ss); } return rc; } int lsm_fs_ss_delete(lsm_connect * c, lsm_fs * fs, lsm_fs_ss * ss, char **job, lsm_flag flags) { CONN_SETUP(c); if (!LSM_IS_FS(fs)) { return LSM_ERR_INVALID_ARGUMENT; } if (!LSM_IS_SS(ss)) { return LSM_ERR_INVALID_ARGUMENT; } if (CHECK_RP(job) || LSM_FLAG_UNUSED_CHECK(flags)) { return LSM_ERR_INVALID_ARGUMENT; } std::map < std::string, Value > p; p["fs"] = fs_to_value(fs); p["snapshot"] = ss_to_value(ss); p["flags"] = Value(flags); Value parameters(p); Value response; int rc = rpc(c, "fs_snapshot_delete", parameters, response); return job_check(c, rc, response, job); } int lsm_fs_ss_restore(lsm_connect * c, lsm_fs * fs, lsm_fs_ss * ss, lsm_string_list * files, lsm_string_list * restore_files, int all_files, char **job, lsm_flag flags) { CONN_SETUP(c); if (!LSM_IS_FS(fs)) { return LSM_ERR_INVALID_ARGUMENT; } if (!LSM_IS_SS(ss)) { return LSM_ERR_INVALID_ARGUMENT; } if (files) { if (!LSM_IS_STRING_LIST(files)) { return LSM_ERR_INVALID_ARGUMENT; } } if (restore_files) { if (!LSM_IS_STRING_LIST(restore_files)) { return LSM_ERR_INVALID_ARGUMENT; } } if (CHECK_RP(job) || LSM_FLAG_UNUSED_CHECK(flags)) { return LSM_ERR_INVALID_ARGUMENT; } std::map < std::string, Value > p; p["fs"] = fs_to_value(fs); p["snapshot"] = ss_to_value(ss); p["files"] = string_list_to_value(files); p["restore_files"] = string_list_to_value(restore_files); p["all_files"] = Value((all_files) ? true : false); p["flags"] = Value(flags); Value parameters(p); Value response; int rc = rpc(c, "fs_snapshot_restore", parameters, response); return job_check(c, rc, response, job); } int lsm_nfs_list(lsm_connect * c, const char *search_key, const char *search_value, lsm_nfs_export ** exports[], uint32_t * count, lsm_flag flags) { int rc = LSM_ERR_OK; CONN_SETUP(c); if (CHECK_RP(exports) || !count) { return LSM_ERR_INVALID_ARGUMENT; } *count = 0; *exports = NULL; try { std::map < std::string, Value > p; rc = add_search_params(p, search_key, search_value, NFS_EXPORT_SEARCH_KEYS, NFS_EXPORT_SEARCH_KEYS_COUNT); if (LSM_ERR_OK != rc) { return rc; } p["flags"] = Value(flags); Value parameters(p); Value response; rc = rpc(c, "exports", parameters, response); if (LSM_ERR_OK == rc && Value::array_t == response.valueType()) { std::vector < Value > exps = response.asArray(); *count = exps.size(); if (*count) { *exports = lsm_nfs_export_record_array_alloc(*count); if (*exports) { for (size_t i = 0; i < *count; ++i) { (*exports)[i] = value_to_nfs_export(exps[i]); if (!((*exports)[i])) { rc = LSM_ERR_NO_MEMORY; goto error; } } } else { rc = LSM_ERR_NO_MEMORY; } } } } catch(const ValueException & ve) { rc = log_exception(c, LSM_ERR_PLUGIN_BUG, "Unexpected type", ve.what()); goto error; } out: return rc; error: if (*exports && *count) { lsm_nfs_export_record_array_free(*exports, *count); *exports = NULL; *count = 0; } goto out; } int lsm_nfs_export_fs(lsm_connect * c, const char *fs_id, const char *export_path, lsm_string_list * root_list, lsm_string_list * rw_list, lsm_string_list * ro_list, uint64_t anon_uid, uint64_t anon_gid, const char *auth_type, const char *options, lsm_nfs_export ** exported, lsm_flag flags) { CONN_SETUP(c); if (root_list) { if (!LSM_IS_STRING_LIST(root_list)) { return LSM_ERR_INVALID_ARGUMENT; } } if (rw_list) { if (!LSM_IS_STRING_LIST(rw_list)) { return LSM_ERR_INVALID_ARGUMENT; } } if (ro_list) { if (!LSM_IS_STRING_LIST(ro_list)) { return LSM_ERR_INVALID_ARGUMENT; } } if (CHECK_STR(fs_id) || CHECK_RP(exported) || !(root_list || rw_list || ro_list) || LSM_FLAG_UNUSED_CHECK(flags)) { return LSM_ERR_INVALID_ARGUMENT; } std::map < std::string, Value > p; p["fs_id"] = Value(fs_id); p["export_path"] = Value(export_path); p["root_list"] = string_list_to_value(root_list); p["rw_list"] = string_list_to_value(rw_list); p["ro_list"] = string_list_to_value(ro_list); p["anon_uid"] = Value(anon_uid); p["anon_gid"] = Value(anon_gid); p["auth_type"] = Value(auth_type); p["options"] = Value(options); p["flags"] = Value(flags); Value parameters(p); Value response; int rc = rpc(c, "export_fs", parameters, response); try { if (LSM_ERR_OK == rc && Value::object_t == response.valueType()) { *exported = value_to_nfs_export(response); if (!(*exported)) { rc = LSM_ERR_NO_MEMORY; } } } catch(const ValueException & ve) { rc = log_exception(c, LSM_ERR_PLUGIN_BUG, "Unexpected type", ve.what()); } return rc; } int lsm_nfs_export_delete(lsm_connect * c, lsm_nfs_export * e, lsm_flag flags) { CONN_SETUP(c); if (!LSM_IS_NFS_EXPORT(e) || LSM_FLAG_UNUSED_CHECK(flags)) { return LSM_ERR_INVALID_ARGUMENT; } std::map < std::string, Value > p; p["export"] = nfs_export_to_value(e); p["flags"] = Value(flags); Value parameters(p); Value response; int rc = rpc(c, "export_remove", parameters, response); return rc; } int lsm_volume_raid_create_cap_get(lsm_connect * c, lsm_system * system, uint32_t ** supported_raid_types, uint32_t * supported_raid_type_count, uint32_t ** supported_strip_sizes, uint32_t * supported_strip_size_count, lsm_flag flags) { CONN_SETUP(c); if (!supported_raid_types || !supported_raid_type_count || !supported_strip_sizes || !supported_strip_size_count) { return LSM_ERR_INVALID_ARGUMENT; } *supported_raid_types = NULL; std::map < std::string, Value > p; p["system"] = system_to_value(system); p["flags"] = Value(flags); Value parameters(p); Value response; int rc = rpc(c, "volume_raid_create_cap_get", parameters, response); try { std::vector < Value > j = response.asArray(); rc = values_to_uint32_array(j[0], supported_raid_types, supported_raid_type_count); if (rc != LSM_ERR_OK) { goto error; } rc = values_to_uint32_array(j[1], supported_strip_sizes, supported_strip_size_count); if (rc != LSM_ERR_OK) { goto error; } } catch(const ValueException & ve) { rc = log_exception(c, LSM_ERR_PLUGIN_BUG, "Unexpected type", ve.what()); } out: return rc; error: free(*supported_raid_types); *supported_raid_types = NULL; *supported_raid_type_count = 0; *supported_strip_sizes = NULL; *supported_strip_size_count = 0; goto out; } int lsm_volume_raid_create(lsm_connect * c, const char *name, lsm_volume_raid_type raid_type, lsm_disk * disks[], uint32_t disk_count, uint32_t strip_size, lsm_volume ** new_volume, lsm_flag flags) { CONN_SETUP(c); if (disk_count == 0) { return log_exception(c, LSM_ERR_INVALID_ARGUMENT, "Require at least one disks", NULL); } if (raid_type == LSM_VOLUME_RAID_TYPE_RAID1 && disk_count != 2) { return log_exception(c, LSM_ERR_INVALID_ARGUMENT, "RAID 1 only allows two disks", NULL); } if (raid_type == LSM_VOLUME_RAID_TYPE_RAID5 && disk_count < 3) { return log_exception(c, LSM_ERR_INVALID_ARGUMENT, "RAID 5 require 3 or more disks", NULL); } if (raid_type == LSM_VOLUME_RAID_TYPE_RAID6 && disk_count < 4) { return log_exception(c, LSM_ERR_INVALID_ARGUMENT, "RAID 5 require 4 or more disks", NULL); } if (disk_count % 2) { if (raid_type == LSM_VOLUME_RAID_TYPE_RAID10 && disk_count < 4) { return log_exception(c, LSM_ERR_INVALID_ARGUMENT, "RAID 10 require even disks count and 4 " "or more disks", NULL); } if (raid_type == LSM_VOLUME_RAID_TYPE_RAID50 && disk_count < 6) { return log_exception(c, LSM_ERR_INVALID_ARGUMENT, "RAID 50 require even disks count and 6 or " "more disks", NULL); } if (raid_type == LSM_VOLUME_RAID_TYPE_RAID60 && disk_count < 8) { return log_exception(c, LSM_ERR_INVALID_ARGUMENT, "RAID 60 require even disks count and 8 or " "more disks", NULL); } } if (CHECK_RP(new_volume)) { return LSM_ERR_INVALID_ARGUMENT; } std::map < std::string, Value > p; p["name"] = Value(name); p["raid_type"] = Value((int32_t) raid_type); p["strip_size"] = Value((int32_t) strip_size); p["flags"] = Value(flags); std::vector < Value > disks_value; disks_value.reserve(disk_count); for (uint32_t i = 0; i < disk_count; i++) { disks_value.push_back(disk_to_value(disks[i])); } p["disks"] = disks_value; Value parameters(p); Value response; int rc = rpc(c, "volume_raid_create", parameters, response); try { if (LSM_ERR_OK == rc) { *new_volume = value_to_volume(response); if (!(*new_volume)) { rc = LSM_ERR_NO_MEMORY; } } } catch(const ValueException & ve) { rc = log_exception(c, LSM_ERR_PLUGIN_BUG, "Unexpected type", ve.what()); } return rc; } libstoragemgmt-1.2.3/c_binding/Makefile.am0000664000175000017500000000107012537546123015436 00000000000000SUBDIRS = include AM_CPPFLAGS = -I$(top_srcdir)/c_binding/include \ -I$(top_builddir)/c_binding/include \ -I@srcdir@/c_binding/include \ $(LIBXML_CFLAGS) $(LIBGLIB_CFLAGS) lib_LTLIBRARIES = libstoragemgmt.la libstoragemgmt_la_LIBADD=$(LIBXML_LIBS) $(YAJL_LIBS) $(LIBGLIB_LIBS) libstoragemgmt_la_LDFLAGS= -version-info $(LIBSM_LIBTOOL_VERSION) libstoragemgmt_la_SOURCES= \ lsm_mgmt.cpp lsm_datatypes.hpp lsm_datatypes.cpp lsm_convert.hpp \ lsm_convert.cpp lsm_ipc.hpp lsm_ipc.cpp lsm_plugin_ipc.hpp \ lsm_plugin_ipc.cpp util/qparams.c util/qparams.h libstoragemgmt-1.2.3/c_binding/lsm_convert.hpp0000664000175000017500000002306112537737032016453 00000000000000/* * Copyright (C) 2011-2013 Red Hat, Inc. * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; If not, see . * * Author: tasleson */ #ifndef LSM_CONVERT_HPP #define LSM_CONVERT_HPP #include "lsm_datatypes.hpp" #include "lsm_ipc.hpp" /** * Class names for serialized json */ const char CLASS_NAME_SYSTEM[] = "System"; const char CLASS_NAME_POOL[] = "Pool"; const char CLASS_NAME_VOLUME[] = "Volume"; const char CLASS_NAME_BLOCK_RANGE[] = "BlockRange"; const char CLASS_NAME_ACCESS_GROUP[] = "AccessGroup"; const char CLASS_NAME_FILE_SYSTEM[] = "FileSystem"; const char CLASS_NAME_DISK[] = "Disk"; const char CLASS_NAME_FS_SNAPSHOT[] = "FsSnapshot"; const char CLASS_NAME_FS_EXPORT[] = "NfsExport"; const char CLASS_NAME_CAPABILITIES[] = "Capabilities"; const char CLASS_NAME_TARGET_PORT[] = "TargetPort"; #define IS_CLASS(x, name) is_expected_object(x, name) #define IS_CLASS_SYSTEM(x) IS_CLASS(x, CLASS_NAME_SYSTEM) #define IS_CLASS_POOL(x) IS_CLASS(x, CLASS_NAME_POOL) #define IS_CLASS_VOLUME(x) IS_CLASS(x, CLASS_NAME_VOLUME) #define IS_CLASS_BLOCK_RANGE(x) IS_CLASS(x, CLASS_NAME_BLOCK_RANGE) #define IS_CLASS_ACCESS_GROUP(x) IS_CLASS(x, CLASS_NAME_ACCESS_GROUP) #define IS_CLASS_FILE_SYSTEM(x) IS_CLASS(x, CLASS_NAME_FILE_SYSTEM) #define IS_CLASS_FS_SNAPSHOT(x) IS_CLASS(x, CLASS_NAME_FS_SNAPSHOT) #define IS_CLASS_FS_EXPORT(x) IS_CLASS(x, CLASS_NAME_FS_EXPORT) /** * Checks to see if a value is an expected object instance * @param obj Value to check * @param class_name Class name to check * @return boolean, true if matches */ bool LSM_DLL_LOCAL is_expected_object(Value & obj, std::string class_name); /** * Converts an array of Values to a lsm_string_list * @param list List represented as an vector of strings. * @return lsm_string_list pointer, NULL on error. */ lsm_string_list LSM_DLL_LOCAL *value_to_string_list(Value & list); /** * Converts a lsm_string_list to a Value * @param sl String list to convert * @return Value */ Value LSM_DLL_LOCAL string_list_to_value(lsm_string_list *sl); /** * Converts a volume to a volume. * @param vol Value to convert. * @return lsm_volume *, else NULL on error */ lsm_volume LSM_DLL_LOCAL *value_to_volume(Value & vol); /** * Converts a lsm_volume *to a Value * @param vol lsm_volume to convert * @return Value */ Value LSM_DLL_LOCAL volume_to_value(lsm_volume *vol); /** * Converts a vector of volume values to an array * @param volume_values Vector of values that represents volumes * @param volumes An array of volume pointers * @param count Number of volumes * @return LSM_ERR_OK on success, else error reason */ int LSM_DLL_LOCAL value_array_to_volumes(Value & volume_values, lsm_volume **volumes[], uint32_t * count); /** * Converts a Value to a lsm_disk * @param disk Value representing a disk * @return lsm_disk pointer, else NULL on error */ lsm_disk LSM_DLL_LOCAL *value_to_disk(Value & disk); /** * Converts a lsm_disk to a value * @param disk lsm_disk to convert to value * @return Value */ Value LSM_DLL_LOCAL disk_to_value(lsm_disk * disk); /** * Converts a vector of disk values to an array. * @param[in] disk_values Vector of values that represents disks * @param[out] disks An array of disk pointers * @param[out] count Number of disks * @return LSM_ERR_OK on success, else error reason. */ int LSM_DLL_LOCAL value_array_to_disks(Value & disk_values, lsm_disk **disks[], uint32_t * count); /** * Converts a value to a pool * @param pool To convert to lsm_pool * * @return lsm_pool *, else NULL on error. */ lsm_pool LSM_DLL_LOCAL *value_to_pool(Value & pool); /** * Converts a lsm_pool * to Value * @param pool Pool pointer to convert * @return Value */ Value LSM_DLL_LOCAL pool_to_value(lsm_pool *pool); /** * Converts a value to a system * @param system to convert to lsm_system * * @return lsm_system pointer, else NULL on error */ lsm_system LSM_DLL_LOCAL *value_to_system(Value & system); /** * Converts a lsm_system * to a Value * @param system pointer to convert to Value * @return Value */ Value LSM_DLL_LOCAL system_to_value(lsm_system *system); /** * Converts a Value to a lsm_access_group * @param group to convert to lsm_access_group* * @return lsm_access_group *, NULL on error */ lsm_access_group LSM_DLL_LOCAL *value_to_access_group(Value & group); /** * Converts a lsm_access_group to a Value * @param group Group to convert * @return Value, null value type on error. */ Value LSM_DLL_LOCAL access_group_to_value(lsm_access_group *group); /** * Converts an access group list to an array of access group pointers * @param[in] group Value representing a std::vector of access groups * @param[out] * @param[out] count Number of items in the returned array. * @return LSM_ERR_OK on success, else error reason */ int LSM_DLL_LOCAL value_array_to_access_groups(Value & group, lsm_access_group **ag_list[], uint32_t * count); /** * Converts an array of lsm_access_group to Value(s) * @param group Pointer to an array of lsm_access_group * @param count Number of items in array. * @return std::vector of Values representing access groups */ Value LSM_DLL_LOCAL access_group_list_to_value(lsm_access_group **group, uint32_t count); /** * Converts a Value to a lsm_block_range * @param br Value representing a block range * @return lsm_block_range * */ lsm_block_range LSM_DLL_LOCAL *value_to_block_range(Value & br); /** * Converts a lsm_block_range to a Value * @param br lsm_block_range to convert * @return Value, null value type on error */ Value LSM_DLL_LOCAL block_range_to_value(lsm_block_range *br); /** * Converts a Value to an array of lsm_block_range * @param[in] brl Value representing block range(s) * @param[out] count Number of items in the resulting array * @return NULL on memory allocation failure, else array of lsm_block_range */ lsm_block_range LSM_DLL_LOCAL **value_to_block_range_list(Value & brl, uint32_t *count); /** * Converts an array of lsm_block_range to Value * @param brl An array of lsm_block_range * @param count Number of items in input * @return Value */ Value LSM_DLL_LOCAL block_range_list_to_value(lsm_block_range **brl, uint32_t count); /** * Converts a value to a lsm_fs * * @param fs Value representing a FS to be converted * @return lsm_fs pointer or NULL on error. */ lsm_fs LSM_DLL_LOCAL *value_to_fs(Value & fs); /** * Converts a lsm_fs pointer to a Value * @param fs File system pointer to convert * @return Value */ Value LSM_DLL_LOCAL fs_to_value(lsm_fs *fs); /** * Converts a value to a lsm_ss * * @param ss Value representing a snapshot to be converted * @return lsm_ss pointer or NULL on error. */ lsm_fs_ss LSM_DLL_LOCAL *value_to_ss(Value & ss); /** * Converts a lsm_ss pointer to a Value * @param ss Snapshot pointer to convert * @return Value */ Value LSM_DLL_LOCAL ss_to_value(lsm_fs_ss *ss); /** * Converts a value to a lsm_nfs_export * * @param exp Value representing a nfs export to be converted * @return lsm_nfs_export pointer or NULL on error. */ lsm_nfs_export LSM_DLL_LOCAL *value_to_nfs_export(Value & exp); /** * Converts a lsm_nfs_export pointer to a Value * @param exp NFS export pointer to convert * @return Value */ Value LSM_DLL_LOCAL nfs_export_to_value(lsm_nfs_export *exp); /** * Converts a Value to a lsm_storage_capabilities * @param exp Value representing a storage capabilities * @return lsm_storage_capabilities pointer or NULL on error */ lsm_storage_capabilities LSM_DLL_LOCAL *value_to_capabilities(Value & exp); /** * Converts a lsm_storage_capabilities to a value * @param cap lsm_storage_capabilities to convert to value * @return Value */ Value LSM_DLL_LOCAL capabilities_to_value(lsm_storage_capabilities *cap); /** * Convert a Value representation to lsm_target_port * @param tp Value to convert to lsm_target_port * @return lsm_target_port pointer or NULL on errors */ lsm_target_port LSM_DLL_LOCAL *value_to_target_port(Value & tp); /** * Converts a lsm_target_port to a value * @param tp lsm_target_port to convert to value * @return Value */ Value LSM_DLL_LOCAL target_port_to_value(lsm_target_port *tp); /** * Converts a value to array of uint32. */ int LSM_DLL_LOCAL values_to_uint32_array(Value & value, uint32_t **uint32_array, uint32_t *count); /** * Converts an array of uint32 to a value. */ Value LSM_DLL_LOCAL uint32_array_to_value(uint32_t *uint32_array, uint32_t count); #endif libstoragemgmt-1.2.3/c_binding/include/0000775000175000017500000000000012542455463015111 500000000000000libstoragemgmt-1.2.3/c_binding/include/libstoragemgmt/0000775000175000017500000000000012542455463020131 500000000000000libstoragemgmt-1.2.3/c_binding/include/libstoragemgmt/libstoragemgmt_volumes.h0000664000175000017500000000635212537737032025021 00000000000000/* * Copyright (C) 2011-2014 Red Hat, Inc. * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; If not, see . * * Author: tasleson */ #ifndef LIBSTORAGEMGMT_VOLUMES_H #define LIBSTORAGEMGMT_VOLUMES_H #include "libstoragemgmt_common.h" #ifdef __cplusplus extern "C" { #endif /** * Frees the memory fro an individual volume * @param v Volume pointer to free. * @return LSM_ERR_OK on success, else error reason. */ int LSM_DLL_EXPORT lsm_volume_record_free(lsm_volume *v); /** * Copies a volume record structure. * @param vol Volume record to be copied. * @return NULL on error, else record copy. */ lsm_volume LSM_DLL_EXPORT *lsm_volume_record_copy(lsm_volume *vol); /** * Frees the memory for each of the volume records and then the array itself. * @param init Array to free. * @param size Size of array. * @return LSM_ERR_OK on success, else error reason. */ int LSM_DLL_EXPORT lsm_volume_record_array_free(lsm_volume *init[], uint32_t size); /** * Retrieves the volume id. * Note: returned value only valid when v is valid! * @param v Volume ptr. * @return Volume id. */ const char LSM_DLL_EXPORT *lsm_volume_id_get(lsm_volume *v); /** * Retrieves the volume name (human recognizable) * Note: returned value only valid when v is valid! * @param v Volume ptr. * @return Volume name */ const char LSM_DLL_EXPORT *lsm_volume_name_get(lsm_volume *v); /** * Retrieves the SCSI page 83 unique ID. * Note: returned value only valid when v is valid! * @param v Volume ptr. * @return SCSI page 83 unique ID. */ const char LSM_DLL_EXPORT *lsm_volume_vpd83_get(lsm_volume *v); /** * Retrieves the volume block size. * @param v Volume ptr. * @return Volume block size. */ uint64_t LSM_DLL_EXPORT lsm_volume_block_size_get(lsm_volume *v); /** * Retrieves the number of blocks. * @param v Volume ptr. * @return Number of blocks. */ uint64_t LSM_DLL_EXPORT lsm_volume_number_of_blocks_get(lsm_volume *v); /** * Retrieves the admin state of the volume. * @param v Volume ptr. * @return Admin state of volume, see LSM_VOLUME_ADMIN_STATE_ENABLED and * LSM_VOLUME_ADMIN_STATE_DISABLED * */ uint32_t LSM_DLL_EXPORT lsm_volume_admin_state_get(lsm_volume *v); /** * Retrieves the system id of the volume. * @param v Volume ptr. * @return System id. */ char LSM_DLL_EXPORT *lsm_volume_system_id_get(lsm_volume *v); /** * Retrieves the pool id that the volume is derived from. * @param v Volume ptr. * @return Pool id. */ char LSM_DLL_EXPORT *lsm_volume_pool_id_get(lsm_volume *v); #ifdef __cplusplus } #endif #endif /* LIBSTORAGEMGMT_VOLUMES_H */ libstoragemgmt-1.2.3/c_binding/include/libstoragemgmt/libstoragemgmt_types.h0000664000175000017500000002516512537737032024476 00000000000000/* * Copyright (C) 2011-2013 Red Hat, Inc. * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; If not, see . * * Author: tasleson */ #ifndef LIBSTORAGEMGMT_TYPES_H #define LIBSTORAGEMGMT_TYPES_H #ifdef __cplusplus #define __STDC_FORMAT_MACROS #define __STDC_LIMIT_MACROS #endif #include #ifdef __cplusplus extern "C" { #endif /** @file libstoragemgmt_types.h */ /* Just incase we want to change the flag to a different type */ typedef uint64_t lsm_flag; #define LSM_CLIENT_FLAG_RSVD 0 /** * Opaque data type for a connection. */ typedef struct _lsm_connect lsm_connect; /** * Opaque data type for a block based storage unit */ typedef struct _lsm_volume lsm_volume; /** * Opaque data type for a storage pool which is used as a base for Volumes etc. * to be created from. */ typedef struct _lsm_pool lsm_pool; /** * Opaque data type for an initiator. */ typedef struct _lsm_initiator lsm_initiator; /** * Opaque data type for storage capabilities. */ typedef struct _lsm_storage_capabilities lsm_storage_capabilities; /** * Access group */ typedef struct _lsm_access_group lsm_access_group; /** * Opaque data type for nfs exports */ typedef struct _lsm_nfs_export lsm_nfs_export; /** * Opaque data type for block ranges (regions to replicate) */ typedef struct _lsm_block_range lsm_block_range; /** * Opaque data type for systems. */ typedef struct _lsm_system lsm_system; /** * Opaque data type for string collection */ typedef struct _lsm_string_list lsm_string_list; /** * Opaque data type for file systems */ typedef struct _lsm_fs lsm_fs; /** * Opaque data type for snapshot */ typedef struct _lsm_fs_ss lsm_fs_ss; /** * Opaque data type for disk */ typedef struct _lsm_disk lsm_disk; /** * Optional data type */ typedef struct _lsm_hash lsm_hash; /** * Opaque data type for Target ports */ typedef struct _lsm_target_port lsm_target_port; /**< \enum lsm_replication_type Different types of replications that can be * created */ typedef enum { LSM_VOLUME_REPLICATE_UNKNOWN = -1, /**^ Unknown replicate */ LSM_VOLUME_REPLICATE_CLONE = 2, /**^ Space efficient copy */ LSM_VOLUME_REPLICATE_COPY = 3, /**^ Full bitwise copy */ LSM_VOLUME_REPLICATE_MIRROR_SYNC = 4, /**^ Mirrors always in sync */ LSM_VOLUME_REPLICATE_MIRROR_ASYNC = 5 /**^ Mirror partner updated with delay */ } lsm_replication_type; /**< \enum lsm_volume_provision_type Different types of provisioning */ typedef enum { LSM_VOLUME_PROVISION_UNKNOWN = -1, /**^ Unknown */ LSM_VOLUME_PROVISION_THIN = 1, /**^ Thin provisioning */ LSM_VOLUME_PROVISION_FULL = 2, /**^ Thick provisioning */ LSM_VOLUME_PROVISION_DEFAULT = 3 /**^ Default provisioning */ } lsm_volume_provision_type; /**^ \enum lsm_volume_raid_type Different types of RAID */ typedef enum { LSM_VOLUME_RAID_TYPE_UNKNOWN = -1, /**^ Unknown */ LSM_VOLUME_RAID_TYPE_RAID0 = 0, /**^ Stripe */ LSM_VOLUME_RAID_TYPE_RAID1 = 1, /**^ Mirror between two disks. For 4 disks or more, they are RAID10.*/ LSM_VOLUME_RAID_TYPE_RAID3 = 3, /**^ Byte-level striping with dedicated parity */ LSM_VOLUME_RAID_TYPE_RAID4 = 4, /**^ Block-level striping with dedicated parity */ LSM_VOLUME_RAID_TYPE_RAID5 = 5, /**^ Block-level striping with distributed parity */ LSM_VOLUME_RAID_TYPE_RAID6 = 6, /**^ Block-level striping with two distributed parities, aka, RAID-DP */ LSM_VOLUME_RAID_TYPE_RAID10 = 10, /**^ Stripe of mirrors */ LSM_VOLUME_RAID_TYPE_RAID15 = 15, /**^ Parity of mirrors */ LSM_VOLUME_RAID_TYPE_RAID16 = 16, /**^ Dual parity of mirrors */ LSM_VOLUME_RAID_TYPE_RAID50 = 50, /**^ Stripe of parities */ LSM_VOLUME_RAID_TYPE_RAID60 = 60, /**^ Stripe of dual parities */ LSM_VOLUME_RAID_TYPE_RAID51 = 51, /**^ Mirror of parities */ LSM_VOLUME_RAID_TYPE_RAID61 = 61, /**^ Mirror of dual parities */ LSM_VOLUME_RAID_TYPE_JBOD = 20, /**^ Just bunch of disks, no parity, no striping. */ LSM_VOLUME_RAID_TYPE_MIXED = 21, /**^ This volume contains multiple RAID settings. */ LSM_VOLUME_RAID_TYPE_OTHER = 22, /**^ Vendor specific RAID type */ } lsm_volume_raid_type; /**^ \enum lsm_pool_member_type Different types of Pool member*/ typedef enum { LSM_POOL_MEMBER_TYPE_UNKNOWN = 0, /**^ Plugin failed to detect the RAID member type. */ LSM_POOL_MEMBER_TYPE_OTHER = 1, /**^ Vendor specific RAID member type. */ LSM_POOL_MEMBER_TYPE_DISK = 2, /**^ Pool is created from RAID group using whole disks. */ LSM_POOL_MEMBER_TYPE_POOL = 3, /**^ * Current pool(also known as sub-pool) is allocated from other * pool(parent pool). * The 'raid_type' will set to RAID_TYPE_OTHER unless certain RAID system * support RAID using space of parent pools. */ } lsm_pool_member_type; #define LSM_VOLUME_STRIP_SIZE_UNKNOWN 0 #define LSM_VOLUME_DISK_COUNT_UNKNOWN 0 #define LSM_VOLUME_MIN_IO_SIZE_UNKNOWN 0 #define LSM_VOLUME_OPT_IO_SIZE_UNKNOWN 0 /** * Admin state for volume, enabled or disabled */ #define LSM_VOLUME_ADMIN_STATE_ENABLED 0x1 /**^ Volume accessible */ #define LSM_VOLUME_ADMIN_STATE_DISABLED 0x0 /**^ Volume unaccessible */ /** * Different states a system status can be in. * Bit field, can be in multiple states at the same time. */ #define LSM_SYSTEM_STATUS_UNKNOWN 0x00000001 /**^ Unknown */ #define LSM_SYSTEM_STATUS_OK 0x00000002 /**^ OK */ #define LSM_SYSTEM_STATUS_ERROR 0x00000004 /**^ Error(s) exist */ #define LSM_SYSTEM_STATUS_DEGRADED 0x00000008 /**^ Degraded */ #define LSM_SYSTEM_STATUS_PREDICTIVE_FAILURE 0x00000010 /**^ System has predictive failure(s) */ #define LSM_SYSTEM_STATUS_OTHER 0x00000020 /**^ Vendor specific */ typedef enum { LSM_ACCESS_GROUP_INIT_TYPE_UNKNOWN = 0, /**^ Unknown */ LSM_ACCESS_GROUP_INIT_TYPE_OTHER = 1, /**^ Something not seen before */ LSM_ACCESS_GROUP_INIT_TYPE_WWPN = 2, /**^ Port name */ LSM_ACCESS_GROUP_INIT_TYPE_ISCSI_IQN = 5, /**^ ISCSI IQN */ LSM_ACCESS_GROUP_INIT_TYPE_ISCSI_WWPN_MIXED = 7 /**^ More than 1 type */ } lsm_access_group_init_type; /**^ \enum lsm_job_status Job states */ typedef enum { LSM_JOB_INPROGRESS = 1, /**^ Job is in progress */ LSM_JOB_COMPLETE = 2, /**^ Job is complete */ LSM_JOB_ERROR = 3 /**^ Job has errored */ } lsm_job_status; typedef enum { LSM_DISK_TYPE_UNKNOWN = 0, LSM_DISK_TYPE_OTHER = 1, LSM_DISK_TYPE_ATA = 3, LSM_DISK_TYPE_SATA = 4, LSM_DISK_TYPE_SAS = 5, LSM_DISK_TYPE_FC = 6, LSM_DISK_TYPE_SOP = 7, LSM_DISK_TYPE_SCSI = 8, LSM_DISK_TYPE_LUN = 9, LSM_DISK_TYPE_NL_SAS = 51, LSM_DISK_TYPE_HDD = 52, LSM_DISK_TYPE_SSD = 53, LSM_DISK_TYPE_HYBRID = 54, } lsm_disk_type; #define LSM_DISK_STATUS_UNKNOWN 0x0000000000000001 #define LSM_DISK_STATUS_OK 0x0000000000000002 #define LSM_DISK_STATUS_OTHER 0x0000000000000004 #define LSM_DISK_STATUS_PREDICTIVE_FAILURE 0x0000000000000008 #define LSM_DISK_STATUS_ERROR 0x0000000000000010 #define LSM_DISK_STATUS_REMOVED 0x0000000000000020 #define LSM_DISK_STATUS_STARTING 0x0000000000000040 #define LSM_DISK_STATUS_STOPPING 0x0000000000000080 #define LSM_DISK_STATUS_STOPPED 0x0000000000000100 #define LSM_DISK_STATUS_INITIALIZING 0x0000000000000200 #define LSM_DISK_STATUS_MAINTENANCE_MODE 0x0000000000000400 #define LSM_DISK_STATUS_SPARE_DISK 0x0000000000000800 #define LSM_DISK_STATUS_RECONSTRUCT 0x0000000000001000 #define LSM_DISK_STATUS_FREE 0x0000000000002000 /**^ * New in version 1.2, New in version 1.2, indicate the whole disk is not * holding any data or acting as a dedicate spare disk. * This disk could be assigned as a dedicated spare disk or used for creating * pool. * If any spare disk(like those on NetApp ONTAP) does not require any explicit * action when assigning to pool, it should be treated as free disk and marked * as LSM_DISK_STATUS_FREE|LSM_DISK_STATUS_SPARE_DISK. * */ #define LSM_DISK_BLOCK_SIZE_NOT_FOUND -1 #define LSM_DISK_BLOCK_COUNT_NOT_FOUND -1 #define LSM_POOL_STATUS_UNKNOWN 0x0000000000000001 #define LSM_POOL_STATUS_OK 0x0000000000000002 #define LSM_POOL_STATUS_OTHER 0x0000000000000004 #define LSM_POOL_STATUS_DEGRADED 0x0000000000000010 #define LSM_POOL_STATUS_ERROR 0x0000000000000020 #define LSM_POOL_STATUS_STOPPED 0x0000000000000200 #define LSM_POOL_STATUS_RECONSTRUCTING 0x0000000000001000 #define LSM_POOL_STATUS_VERIFYING 0x0000000000002000 #define LSM_POOL_STATUS_INITIALIZING 0x0000000000004000 #define LSM_POOL_STATUS_GROWING 0x0000000000008000 #define LSM_POOL_ELEMENT_TYPE_POOL 0x0000000000000002 #define LSM_POOL_ELEMENT_TYPE_VOLUME 0x0000000000000004 #define LSM_POOL_ELEMENT_TYPE_FS 0x0000000000000008 #define LSM_POOL_ELEMENT_TYPE_DELTA 0x0000000000000010 #define LSM_POOL_ELEMENT_TYPE_VOLUME_FULL 0x0000000000000020 #define LSM_POOL_ELEMENT_TYPE_VOLUME_THIN 0x0000000000000040 #define LSM_POOL_ELEMENT_TYPE_SYS_RESERVED 0x0000000000000400 #define LSM_POOL_UNSUPPORTED_VOLUME_GROW 0x0000000000000001 #define LSM_POOL_UNSUPPORTED_VOLUME_SHRINK 0x0000000000000002 typedef enum { LSM_TARGET_PORT_TYPE_OTHER = 1, LSM_TARGET_PORT_TYPE_FC = 2, LSM_TARGET_PORT_TYPE_FCOE = 3, LSM_TARGET_PORT_TYPE_ISCSI = 4 } lsm_target_port_type; #define LSM_VOLUME_VCR_STRIP_SIZE_DEFAULT 0 /** ^ Plugin and hardware RAID will use their default strip size */ #ifdef __cplusplus } #endif #endif /* LIBSTORAGEMGMT_TYPES_H */ libstoragemgmt-1.2.3/c_binding/include/libstoragemgmt/libstoragemgmt_fs.h0000664000175000017500000000523612537737032023737 00000000000000/* * Copyright (C) 2011-2014 Red Hat, Inc. * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; If not, see . * * Author: tasleson */ #ifndef LSM_FS_H #define LSM_FS_H #include "libstoragemgmt_common.h" #ifdef __cplusplus extern "C" { #endif /** * Frees a File system record * @param fs File system to free. * @return LSM_ERR_OK on success, else error reason. */ int LSM_DLL_EXPORT lsm_fs_record_free(lsm_fs *fs); /** * Copies a file system record. * @param source File system record to copy. * @return Pointer to copy of file system record */ lsm_fs LSM_DLL_EXPORT *lsm_fs_record_copy(lsm_fs * source); /** * Frees an array of file system records * @param fs Array of file system record pointers * @param size Number in array to free * @return LSM_ERR_OK on success, else error reason. */ int LSM_DLL_EXPORT lsm_fs_record_array_free(lsm_fs *fs[], uint32_t size); /** * Returns the id of the file system. * @param fs File system record pointer * @return Pointer to file system id */ const char LSM_DLL_EXPORT *lsm_fs_id_get(lsm_fs *fs); /** * Returns the name associated with the file system. * @param fs File system record pointer * @return Pointer to file system name */ const char LSM_DLL_EXPORT *lsm_fs_name_get(lsm_fs *fs); /** * Returns the file system system id. * @param fs File system record pointer * @return Pointer to the system id. */ const char LSM_DLL_EXPORT *lsm_fs_system_id_get(lsm_fs *fs); /** * Returns the pool id associated with the file system * @param fs File system record pointer * @return Pointer to pool id */ const char LSM_DLL_EXPORT *lsm_fs_pool_id_get(lsm_fs *fs); /** * Returns total space of file system. * @param fs File system record pointer * @return Total size of file system in bytes */ uint64_t LSM_DLL_EXPORT lsm_fs_total_space_get(lsm_fs *fs); /** * Returns the space available on the file system * @param fs File system record pointer * @return Total number of bytes that are free. */ uint64_t LSM_DLL_EXPORT lsm_fs_free_space_get(lsm_fs *fs); #ifdef __cplusplus } #endif #endif libstoragemgmt-1.2.3/c_binding/include/libstoragemgmt/libstoragemgmt_accessgroups.h0000664000175000017500000000641612537737032026031 00000000000000/* * Copyright (C) 2011-2014 Red Hat, Inc. * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; If not, see . * * Author: tasleson */ #ifndef LSM_ACCESS_GROUP_H #define LSM_ACCESS_GROUP_H #include "libstoragemgmt_types.h" #ifdef __cplusplus extern "C" { #endif /** * Frees the resources for an access group. * @param group Group to free * @return LSM_ERR_OK on success, else error reason. */ int LSM_DLL_EXPORT lsm_access_group_record_free(lsm_access_group * group); /** * Frees the resources for an array of access groups. * @param ag Array of access groups to free resources for * @param size Number of elements in the array. * @return LSM_ERR_OK on success, else error reason. */ int LSM_DLL_EXPORT lsm_access_group_record_array_free(lsm_access_group *ag[], uint32_t size); /** * Copies an access group. * @param ag Access group to copy * @return NULL on error, else copied access group. */ lsm_access_group LSM_DLL_EXPORT * lsm_access_group_record_copy(lsm_access_group * ag); /** * Returns a pointer to the id. * Note: Storage is allocated in the access group and will be deleted when * the access group gets freed. If you need longer lifespan copy the value. * @param group Access group to retrieve id for. * @return Null on error (not an access group), else value of group. */ const char LSM_DLL_EXPORT *lsm_access_group_id_get(lsm_access_group *group); /** * Returns a pointer to the name. * Note: Storage is allocated in the access group and will be deleted when * the access group gets freed. If you need longer lifespan copy the value. * @param group Access group to retrieve id for. * @return Null on error (not an access group), else value of name. */ const char LSM_DLL_EXPORT *lsm_access_group_name_get(lsm_access_group *group); /** * Returns a pointer to the system id. * Note: Storage is allocated in the access group and will be deleted when * the access group gets freed. If you need longer lifespan copy the value. * @param group Access group to retrieve id for. * @return Null on error (not an access group), else value of system id. */ const char LSM_DLL_EXPORT * lsm_access_group_system_id_get(lsm_access_group *group); /** * Returns a pointer to the initiator list. * Note: Storage is allocated in the access group and will be deleted when * the access group gets freed. If you need longer lifespan copy the value. * @param group Access group to retrieve id for. * @return Null on error (not an access group), else value of initiator list. */ lsm_string_list LSM_DLL_EXPORT * lsm_access_group_initiator_id_get(lsm_access_group * group); #ifdef __cplusplus } #endif #endif libstoragemgmt-1.2.3/c_binding/include/libstoragemgmt/libstoragemgmt_common.h0000664000175000017500000001074712537737032024622 00000000000000/* * Copyright (C) 2011-2014 Red Hat, Inc. * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; If not, see . * * Author: tasleson */ #ifndef LSM_COMMON_H #define LSM_COMMON_H #include "libstoragemgmt_types.h" #ifdef __cplusplus extern "C" { #endif #if defined _WIN32 || defined __CYGWIN__ #define LSM_DLL_IMPORT __declspec(dllimport) #define LSM_DLL_EXPORT __declspec(dllexport) #define LSM_DLL_LOCAL #else #if __GNUC__ >= 4 #define LSM_DLL_IMPORT __attribute__ ((visibility ("default"))) #define LSM_DLL_EXPORT __attribute__ ((visibility ("default"))) #define LSM_DLL_LOCAL __attribute__ ((visibility ("hidden"))) #else #define LSM_DLL_IMPORT #define LSM_DLL_EXPORT #define LSM_DLL_LOCAL #endif #endif /** * Allocates storage for string line of specified size. * @param size Initial number of strings to allocate * @return NULL on error, else valid lsm_string_list record pointer */ lsm_string_list LSM_DLL_EXPORT *lsm_string_list_alloc(uint32_t size); /** * Frees the memory allocated with the lsmStringListFree * @param sl Record to free * @return LSM_ERR_OK on success, else error reason. */ int LSM_DLL_EXPORT lsm_string_list_free(lsm_string_list *sl); /** * Copies a lsm_string_list record. * @param src Source to copy * @return NULL on error, else copy of source. */ lsm_string_list LSM_DLL_EXPORT *lsm_string_list_copy(lsm_string_list *src); /** * Set the specified element with the passed value. * @param sl Valid string list pointer * @param index Element position to set value to * @param value Value to use for assignment * @return LSM_ERR_OK on success, else error reason */ int LSM_DLL_EXPORT lsm_string_list_elem_set(lsm_string_list *sl, uint32_t index, const char *value); /** * Returns the value at the specified elem index * @param sl Valid string list pointer * @param index Index to retrieve * @return Value at that index position. */ const char LSM_DLL_EXPORT *lsm_string_list_elem_get(lsm_string_list *sl, uint32_t index); /** * Returns the size of the list * @param sl Valid string list pointer * @return size of list, note you cannot create a zero sized list, so * 0 indicates error with structure * */ uint32_t LSM_DLL_EXPORT lsm_string_list_size(lsm_string_list *sl); /** * Appends a char * to the string list, will grow container as needed. * @param sl String list to append to * @param add Character string to add * @return LSM_ERR_OK on success, else error reason */ int LSM_DLL_EXPORT lsm_string_list_append(lsm_string_list *sl, const char *add); /** * Deletes the string at the specified index. * NOTE: The elements after this one are moved down, thus if you wanted to * iterate over the list deleting each element one by one you need to do so in * reverse order. * @param sl String list to remove item from * @param index Specified index * @return LSM_ERR_OK on success, else error reason */ int LSM_DLL_EXPORT lsm_string_list_delete(lsm_string_list *sl, uint32_t index); /** * Checks to see if initiator id is valid * @param init_id Initiator value * @param init_type Type of initiator id, will get modified * to determined if type passed in is UNKNOWN * @return LSM_ERR_OK if initiator id is OK, else LSM_INVALID_ARGUMENT */ int LSM_DLL_EXPORT lsm_initiator_id_verify(const char *init_id, lsm_access_group_init_type *init_type); /** * Checks to see if volume vpd83 is valid * @param vpd83 VPD string to check * @return LSM_ERR_OK if vpd is OK, else LSM_INVALID_ARGUMENT */ int LSM_DLL_EXPORT lsm_volume_vpd83_verify(const char *vpd83); #ifdef __cplusplus } #endif #endif /* LSM_COMMON_H */ libstoragemgmt-1.2.3/c_binding/include/libstoragemgmt/libstoragemgmt_plug_interface.h0000664000175000017500000020212312537737032026310 00000000000000/* * Copyright (C) 2011-2014 Red Hat, Inc. * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; If not, see . * * Author: tasleson */ #ifndef LIBSTORAGEMGMT_PLUG_INTERFACE_H #define LIBSTORAGEMGMT_PLUG_INTERFACE_H #include "libstoragemgmt_common.h" #include "libstoragemgmt_accessgroups.h" #include "libstoragemgmt_blockrange.h" #include "libstoragemgmt_capabilities.h" #include "libstoragemgmt_error.h" #include "libstoragemgmt_fs.h" #include "libstoragemgmt_nfsexport.h" #include "libstoragemgmt_hash.h" #include "libstoragemgmt_pool.h" #include "libstoragemgmt_snapshot.h" #include "libstoragemgmt_systems.h" #include "libstoragemgmt_volumes.h" #include "libstoragemgmt_disk.h" #ifdef __cplusplus extern "C" { #endif /** @file libstoragemgmt_plug_interface.h */ /** \enum lsm_data_type What type of data structure we have */ typedef enum { LSM_DATA_TYPE_UNKNOWN = -1, /**< Unknown */ LSM_DATA_TYPE_NONE, /**< None */ LSM_DATA_TYPE_ACCESS_GROUP, /**< Access group */ LSM_DATA_TYPE_BLOCK_RANGE, /**< Block range */ LSM_DATA_TYPE_FS, /**< File system */ LSM_DATA_TYPE_NFS_EXPORT, /**< NFS export */ LSM_DATA_TYPE_POOL, /**< Pool */ LSM_DATA_TYPE_SS, /**< Snap shot */ LSM_DATA_TYPE_STRING_LIST, /**< String list */ LSM_DATA_TYPE_SYSTEM, /**< System */ LSM_DATA_TYPE_VOLUME, /**< Volume */ LSM_DATA_TYPE_DISK /**< Disk */ } lsm_data_type; /** * Opaque data type for plug-ins */ typedef struct _lsm_plugin lsm_plugin; /** * Typedef for pointer type */ typedef lsm_plugin *lsm_plugin_ptr; /** * Plug-in register callback function signature. * @param c Valid lsm plugin pointer * @param uri Connection URI * @param password Plain text password * @param timeout Plug-in timeout to array * @param flags Reserved * @return LSM_ERR_OK, else error reason */ typedef int (*lsm_plugin_register) (lsm_plugin_ptr c, const char *uri, const char *password, uint32_t timeout, lsm_flag flags); /** * Plug-in unregister callback function signature * @param c Valid lsm plugin pointer * @param flags Reserved * @return LSM_ERR_OK, else error reason */ typedef int (*lsm_plugin_unregister) (lsm_plugin_ptr c, lsm_flag flags); /** * Set plug-in time-out value callback function signature * @param c Valid lsm plug-in pointer * @param timeout timeout value in milliseconds * @param flags Reserved * @return LSM_ERR_OK, else error reason */ typedef int (*lsm_plug_tmo_set) (lsm_plugin_ptr c, uint32_t timeout, lsm_flag flags); /** * Get the plug-in time-out value callback function signature * @param[in] c Valid lsm plug-in pointer * @param[out] timeout Time-out value * @param[in] flags Reserved * @return LSM_ERR_OK, else error reason */ typedef int (*lsm_plug_tmo_get) (lsm_plugin_ptr c, uint32_t *timeout, lsm_flag flags); /** * Retrieve the plug-in capabilities callback function signature * @param[in] c Valid lsm plug-in pointer * @param[in] sys System to interrogate * @param[out] cap Capabilities * @param[in] flags Reserved * @return LSM_ERR_OK, else error reason */ typedef int (*lsm_plug_capabilities) (lsm_plugin_ptr c, lsm_system *sys, lsm_storage_capabilities **cap, lsm_flag flags); /** * Retrieve the job status callback function signature * @param[in] c Valid lsm plug-in pointer * @param[in] job Job identifier * @param[out] status Enumerated value representing status * @param[out] percent_complete How far completed * @param[out] type Type of result * @param[out] value Value of result * @param[in] flags Reserved * @return LSM_ERR_OK, else error reason */ typedef int (*lsm_plug_Job_status) (lsm_plugin_ptr c, const char *job, lsm_job_status *status, uint8_t *percent_complete, lsm_data_type *type, void **value, lsm_flag flags); /** * Instructs the plug-in to release the memory for the specified job id, * callback function signature * @param[in] c Valid lsm plug-in pointer * @param[in] job_id Job ID to free memory for * @param[in] flags Reserved * @return LSM_ERR_OK, else error reason */ typedef int (*lsm_plug_job_free) (lsm_plugin_ptr c, char *job_id, lsm_flag flags); /** * Retrieves a list of pools callback function signature * @param[in] c Valid lsm plug-in pointer * @param[in] search_key Search key * @param[in] search_value Search value * @param[out] pool_array List of pools * @param[out] count Number of items in array * @param[in] flags Reserved * @return LSM_ERR_OK, else error reason */ typedef int (*lsm_plug_pool_list) (lsm_plugin_ptr c, const char *search_key, const char *search_value, lsm_pool **pool_array[], uint32_t *count, lsm_flag flags); /** * Retrieve a list of systems, callback function signature * @param[in] c Valid lsm plug-in pointer * @param[out] systems List of systems * @param[out] system_count Number of systems * @param[out] flags Reserved * @return LSM_ERR_OK, else error reason */ typedef int (*lsm_plug_system_list) (lsm_plugin_ptr c, lsm_system **systems[], uint32_t *system_count, lsm_flag flags); /** \struct lsm_mgmt_ops_v1 * \brief Callback functions for management operations */ struct lsm_mgmt_ops_v1 { lsm_plug_tmo_set tmo_set; /**< tmo set callback */ lsm_plug_tmo_get tmo_get; /**< tmo get callback */ lsm_plug_capabilities capablities; /**< capabilities callback */ lsm_plug_Job_status job_status; /**< status of job */ lsm_plug_job_free job_free; /**< Free a job */ lsm_plug_pool_list pool_list; /**< List of pools */ lsm_plug_system_list system_list; /**< List of systems */ }; /** * Retrieve a list of volumes. * @param[in] c Valid lsm plug-in pointer * @param[in] search_key Search key * @param[in] search_value Search value * @param[out] vol_array Array of volumes * @param[out] count Number of volumes * @param[in] flags Reserved * @return LSM_ERR_OK, else error reason */ typedef int (*lsm_plug_volume_list) (lsm_plugin_ptr c, const char *search_key, const char *search_val, lsm_volume **vol_array[], uint32_t *count, lsm_flag flags); /** * Retrieve a list of volumes. * @param[in] c Valid lsm plug-in pointer * @param[in] search_key Search key * @param[in] search_value Search value * @param[out] disk_array Array of disk pointers * @param[out] count Number of disks * @param[in] flags Reserved * @return LSM_ERR_OK, else error reason */ typedef int (*lsm_plug_disk_list) (lsm_plugin_ptr c, const char *search_key, const char *search_value, lsm_disk **disk_array[], uint32_t *count, lsm_flag flags); /** * Retrieve a list of target ports. * @param[in] c Valid lsm plugin-in pointer * @param[in] search_key Search key * @param[in] search_value Search value * @param[out] target_port_array Array of target port pointers * @param[out] count Number of target ports * @param[in] flags Reserved * @return LSM_ERR_OK, else error reason */ typedef int (*lsm_plug_target_port_list) (lsm_plugin_ptr c, const char *search_key, const char *search_value, lsm_target_port **target_port_array[], uint32_t *count, lsm_flag flags); /** * Creates a volume, callback function signature * @param[in] c Valid lsm plug-in pointer * @param[in] pool Pool to allocated storage from * @param[in] volume_name Name of new volume * @param[in] size Size of volume in bytes * @param[in] provisioning How provisioned * @param[out] new_volume Information on newly created volume * @param[out] job Job ID * @param[in] flags Reserved * @return LSM_ERR_OK, else error reason */ typedef int (*lsm_plug_volume_create) (lsm_plugin_ptr c, lsm_pool *pool, const char *volume_name, uint64_t size, lsm_volume_provision_type provisioning, lsm_volume **new_volume, char **job, lsm_flag flags); /** * Volume replicate, callback function signature * @param[in] c Valid lsm plug-in pointer * @param[in] pool Pool to allocated replicant from (optional) * @param[in] rep_type Replication type * @param[in] volume_src Source of the replication * @param[in] name Name of newly replicated volume * @param[out] new_replicant Newly replicated volume * @param job * @param flags * @return LSM_ERR_OK, else error reason */ typedef int (*lsm_plug_volume_replicate) (lsm_plugin_ptr c, lsm_pool *pool, lsm_replication_type rep_type, lsm_volume *volume_src, const char *name, lsm_volume **new_replicant, char **job, lsm_flag flags); /** * Return the block size of a replicated block range. * @param[in] c Valid lsm plug-in pointer * @param[in] system System to query against * @param[out] bs Block size * @param[out] flags Reserved * @return LSM_ERR_OK, else error reason */ typedef int (*lsm_plug_volume_replicate_range_block_size) (lsm_plugin_ptr c, lsm_system * system, uint32_t *bs, lsm_flag flags); /** * Replicate a range of a volume to the same volume or different volume. * @param[in] c Valid lsm plug-in pointer * @param[in] rep_type What type of replication * @param[in] source Source of the replication * @param[in] dest Destination of the replication, can be * same as source * @param[in] ranges An array of ranges * @param[in] num_ranges Number of items in array * @param[out] job Job ID * @param flags * @return LSM_ERR_OK, else error reason */ typedef int (*lsm_plug_volume_replicate_range) (lsm_plugin_ptr c, lsm_replication_type rep_type, lsm_volume *source, lsm_volume *dest, lsm_block_range **ranges, uint32_t num_ranges, char **job, lsm_flag flags); /** * Re-size a volume, callback function signature * @param[in] c Valid lsm plug-in pointer * @param[in] volume Volume to be re-sized * @param[in] new_size New size of volume in bytes * @param[in] resized_volume Information about newly re-sized volume * @param[out] job The job ID * @param[in] flags Reserved * @return LSM_ERR_OK, else error reason */ typedef int (*lsm_plug_volume_resize) (lsm_plugin_ptr c, lsm_volume *volume, uint64_t new_size, lsm_volume **resized_volume, char **job, lsm_flag flags); /** * Delete a volume, callback function signature * @param[in] c Valid lsm plug-in pointer * @param[in] volume Volume to be deleted * @param[out] job Job ID * @param[in] flags Reserved * @return LSM_ERR_OK, else error reason */ typedef int (*lsm_plug_volume_delete) (lsm_plugin_ptr c, lsm_volume *volume, char **job, lsm_flag flags); /** * Place a volume online, callback function signature. * @param[in] c Valid lsm plug-in pointer * @param[in] v Volume to place online * @param[in] flags Reserved * @return LSM_ERR_OK, else error reason */ typedef int (*lsm_plug_volume_enable) (lsm_plugin_ptr c, lsm_volume *v, lsm_flag flags); /** * Take a volume offline, callback function signature. * @param[in] c Valid lsm plug-in pointer * @param v * @param flags * @return LSM_ERR_OK, else error reason */ typedef int (*lsm_plug_volume_disable) (lsm_plugin_ptr c, lsm_volume *v, lsm_flag flags); /** * Setup the cap authentication for the specified initiator, callback * function signature * @param[in] c Valid lsm plug-in pointer * @param[in] init_id Initiator to set chap authentication for * @param[in] in_user CHAP inbound username * @param[in] in_password CHAP inbound password * @param[in] out_user CHAP outbound user name * @param[in] out_password CHAP outbound user name * @param[in] flags Reserved * @return LSM_ERR_OK, else error reason */ typedef int (*lsm_plug_iscsi_chap_auth) (lsm_plugin_ptr c, const char *init_id, const char *in_user, const char *in_password, const char *out_user, const char *out_password, lsm_flag flags); /** * Retrieve a list of access groups, callback function signature * @param[in] c Valid lsm plug-in pointer * @param[in] search_key Field to search on * @param[in] search_value Field value * @param[out] groups Array of groups * @param[out] group_count Number of groups * @param[in] flags Reserved * @return LSM_ERR_OK, else error reason */ typedef int (*lsm_plug_access_group_list) (lsm_plugin_ptr c, const char *search_key, const char *search_value, lsm_access_group **groups[], uint32_t *group_count, lsm_flag flags); /** * Creates an access group, callback function signature * @param[in] c Valid lsm plug-in pointer * @param[in] name Name of access group * @param[in] initiator_id Initiator to be added to group * @param[in] id_type Initiator type * @param[in] system System to create group for * @param[out] access_group Newly created access group * @param[in] flags Reserved * @return LSM_ERR_OK, else error reason */ typedef int (*lsm_plug_access_group_create) (lsm_plugin_ptr c, const char *name, const char *initiator_id, lsm_access_group_init_type init_type, lsm_system *system, lsm_access_group **access_group, lsm_flag flags); /** * Deletes an access group, callback function signature * @param[in] c Valid lsm plug-in pointer * @param[in] group Access group to be deleted * @param[in] flags Reserved * @return LSM_ERR_OK, else error reason */ typedef int (*lsm_plug_access_group_delete) (lsm_plugin_ptr c, lsm_access_group *group, lsm_flag flags); /** * Add an initiator to an access group, callback function signature * @param[in] c Valid lsm plug-in pointer * @param[in] access_group Group to add initiator to * @param[in] initiator_id Initiator to add to group * @param[in] id_type Initiator type * @param[out] updated_access_group Updated access group * @param[in] flags Reserved * @return LSM_ERR_OK, else error reason */ typedef int (*lsm_plug_access_group_initiator_add) (lsm_plugin_ptr c, lsm_access_group *access_group, const char *initiator_id, lsm_access_group_init_type id_type, lsm_access_group **updated_access_group, lsm_flag flags); /** * Remove an initiator from an access group, callback function signature * @param[in] c Valid lsm plug-in pointer * @param[in] access_group Group to remove initiator from * @param[in] initiator_id Initiator to remove * @param[in] id_type Initiator type * @param[out] updated_access_group Updated access group * @param[in] flags Reserved * @return LSM_ERR_OK, else error reason */ typedef int (*lsm_plug_access_group_initiator_delete) (lsm_plugin_ptr c, lsm_access_group *access_group, const char *initiator_id, lsm_access_group_init_type id_type, lsm_access_group **updated_access_group, lsm_flag flags); /** * Grants access to a volume for the specified access group, callback function * signature * @param[in] c Valid lsm plug-in pointer * @param[in] group Group to be granted access * @param[in] volume Volume to be given access too * @param[in] flags Reserved * @return LSM_ERR_OK, else error reason */ typedef int (*lsm_plug_volume_mask) (lsm_plugin_ptr c, lsm_access_group *group, lsm_volume *volume, lsm_flag flags); /** * Revokes access to a volume for a specified access group, callback function * signature * @param[in] c Valid lsm plug-in pointer * @param[in] group Group to revoke access for * @param[in] volume Volume to which will no longer be * accessible by group * @param[in] flags Reserved * @return LSM_ERR_OK, else error reason */ typedef int (*lsm_plug_volume_unmask) (lsm_plugin_ptr c, lsm_access_group * group, lsm_volume *volume, lsm_flag flags); /** * Retrieve an array of volumes which are accessible by access group, callback * function signature * @param[in] c Valid lsm plug-in pointer * @param[in] group Group to find volumes for * @param[out] volumes Array of volumes * @param[out] count Number of volumes * @param[in] flags Reserved * @return LSM_ERR_OK, else error reason */ typedef int (*lsm_plug_volumes_accessible_by_access_group) (lsm_plugin_ptr c, lsm_access_group *group, lsm_volume **volumes[], uint32_t *count, lsm_flag flags); /** * Retrieve a list of access groups that have access to the specified volume, * callback function signature * @param[in] c Valid lsm plug-in pointer * @param[in] volume Volume to query * @param[out] groups Array of access groups * @param[out] group_count Number of access groups * @param[in] flags Reserved * @return LSM_ERR_OK, else error reason */ typedef int (*lsm_plug_access_groups_granted_to_volume) (lsm_plugin_ptr c, lsm_volume *volume, lsm_access_group **groups[], uint32_t *group_count, lsm_flag flags); /** * Determine if a volume has child dependencies, callback function signature * @param[in] c Valid lsm plug-in pointer * @param[in] volume Volume to query * @param[out] yes Boolean * @param[in] flags Reserved * @return LSM_ERR_OK, else error reason */ typedef int (*lsm_plug_volume_child_dependency) (lsm_plugin_ptr c, lsm_volume *volume, uint8_t *yes, lsm_flag flags); /** * Remove dependencies from a volume, callback function signature * @param[in] c Valid lsm plug-in pointer * @param[in] volume Volume to remove dependency for * @param[out] job Job ID * @param[in] flags Reserved * @return LSM_ERR_OK, else error reason */ typedef int (*lsm_plug_volume_child_dependency_delete) (lsm_plugin_ptr c, lsm_volume *volume, char **job, lsm_flag flags); /** * File system list, callback function signature * @param[in] c Valid lsm plug-in pointer * @param[in] search_key Search key * @param[in] search_value Search value * @param[out] fs An array of file systems * @param[out] fs_count Number of file systems * @param[in] flags Reserved * @return LSM_ERR_OK, else error reason */ typedef int (*lsm_plug_fs_list) (lsm_plugin_ptr c, const char *search_key, const char *search_value, lsm_fs **fs[], uint32_t *fs_count, lsm_flag flags); /** * Create a file system, callback function signature * @param[in] c Valid lsm plug-in pointer * @param[in] pool Pool to create file system from * @param[in] name Name of file system * @param[in] size_bytes Size of the file system in bytes * @param[out] fs Newly created file system * @param[out] job Job ID * @param[in] flags Reserved * @return LSM_ERR_OK, else error reason */ typedef int (*lsm_plug_fs_create) (lsm_plugin_ptr c, lsm_pool *pool, const char *name, uint64_t size_bytes, lsm_fs **fs, char **job, lsm_flag flags); /** * Delete a file system, callback function signature * @param[in] c Valid lsm plug-in pointer * @param[in] fs File system to delete * @param[out] job Job ID * @param[in] flags Reserved * @return LSM_ERR_OK, else error reason */ typedef int (*lsm_plug_fs_delete) (lsm_plugin_ptr c, lsm_fs *fs, char **job, lsm_flag flags); /** * Clone a file system, callback function signature * @param[in] c Valid lsm plug-in pointer * @param[in] dest_fs_name Clone fs name * @param[out] cloned_fs New clone * @param[in] optional_snapshot Basis of clone * @param[out] job Job ID * @param[in] flags reserved * @return LSM_ERR_OK, else error reason */ typedef int (*lsm_plug_fs_clone) (lsm_plugin_ptr c, lsm_fs *src_fs, const char *dest_fs_name, lsm_fs **cloned_fs, lsm_fs_ss *optional_snapshot, char **job, lsm_flag flags); /** * Determine if a file system has child dependencies, callback function signature * @param[in] c Valid lsm plug-in pointer * @param[in] fs File system to check * @param[in] files Specific files to check * @param[out] yes Boolean * @return LSM_ERR_OK, else error reason */ typedef int (*lsm_plug_fs_child_dependency) (lsm_plugin_ptr c, lsm_fs *fs, lsm_string_list *files, uint8_t *yes); /** * Remove dependencies from a file system, callback function signature * @param[in] c Valid lsm plug-in pointer * @param[in] fs File system to remove dependencies for * @param[in] files Specific files to remove dependencies for * @param[out] job Job ID * @param[out] flags Reserved * @return LSM_ERR_OK, else error reason */ typedef int (*lsm_plug_fs_child_dependency_delete) (lsm_plugin_ptr c, lsm_fs *fs, lsm_string_list * files, char **job, lsm_flag flags); /** * Re-size a file system, callback function signature * @param[in] c Valid lsm plug-in pointer * @param[in] fs File system to re-size * @param[in] new_size_bytes New size of file system * @param[out] rfs Re-sized file system * @param[out] job Job ID * @param[in] flags Reserved * @return LSM_ERR_OK, else error reason */ typedef int (*lsm_plug_fs_resize) (lsm_plugin_ptr c, lsm_fs *fs, uint64_t new_size_bytes, lsm_fs ** rfs, char **job, lsm_flag flags); /** * Clone an individual file on a file system, callback function signature * @param[in] c Valid lsm plug-in pointer * @param[in] fs File system which contains the file to clone * @param[in] src_file_name Source file name and path * @param[in] dest_file_name Destination file and path * @param[in] snapshot Optional backing snapshot * @param[out] job Job ID * @param[in] flags Reserved * @return LSM_ERR_OK, else error reason */ typedef int (*lsm_plug_fs_file_clone) (lsm_plugin_ptr c, lsm_fs *fs, const char *src_file_name, const char *dest_file_name, lsm_fs_ss *snapshot, char **job, lsm_flag flags); /** * Retrieve a list of fs snapshots for a file system, callback function * signature * @param[in] c Valid lsm plug-in pointer * @param[in] fs File system * @param[out] ss Array of snap shots * @param[out] ss_count Count of snapshots * @param[in] flags Reserved * @return LSM_ERR_OK, else error reason */ typedef int (*lsm_plug_fs_ss_list) (lsm_plugin_ptr c, lsm_fs *fs, lsm_fs_ss **ss[], uint32_t *ss_count, lsm_flag flags); /** * Create a fs snapshot of the specified file system and optionally constrain * it to a list of files, callback function signature * @param[in] c Valid lsm plug-in pointer * @param[in] fs File system to create snapshot for * @param[in] name Snap shot name * @param[out] snapshot Newly created snapshot * @param[out] job Job ID * @return LSM_ERR_OK, else error reason */ typedef int (*lsm_plug_fs_ss_create) (lsm_plugin_ptr c, lsm_fs *fs, const char *name, lsm_fs_ss **snapshot, char **job, lsm_flag flags); /** * Delete a fs snapshot, callback function signature, callback function * signature * @param[in] c Valid lsm plug-in pointer * @param[in] fs File system to delete snapshot for * @param[in] ss Snapshot to delete * @param[out] job Job ID * @param[in] flags Reserved * @return LSM_ERR_OK, else error reason */ typedef int (*lsm_plug_fs_ss_delete) (lsm_plugin_ptr c, lsm_fs *fs, lsm_fs_ss *ss, char **job, lsm_flag flags); /** * Revert the state of a file system or specific files to a previous state, * callback function signature * @param[in] c Valid lsm plug-in pointer * @param[in] fs File system of interest * @param[in] files Optional list of files * @param[in] restore_files Optional path and name of restored files * @param[in] all_files boolean to indicate all files should be * restored * @param[out] job Job ID * @return LSM_ERR_OK, else error reason */ typedef int (*lsm_plug_fs_ss_restore) (lsm_plugin_ptr c, lsm_fs *fs, lsm_fs_ss *ss, lsm_string_list *files, lsm_string_list *restore_files, int all_files, char **job, lsm_flag flags); /** * Get a list of NFS client authentication types, callback function signature * @param[in] c Valid lsm plug-in pointer * @param[out] types List of authtication types * @param[in] flags Reserved * @return LSM_ERR_OK, else error reason */ typedef int (*lsm_plug_nfs_auth_types) (lsm_plugin_ptr c, lsm_string_list **types, lsm_flag flags); /** * Retrieve a list of NFS exports, callback function signature * @param[in] c Valid lsm plug-in pointer * @param[in] search_key Search key * @param[in] search_value Search value * @param[out] exports An array of exported file systems * @param[out] count Number of exported file systems * @param[in] flags Reserved * @return LSM_ERR_OK, else error reason */ typedef int (*lsm_plug_nfs_list) (lsm_plugin_ptr c, const char *search_key, const char *search_value, lsm_nfs_export **exports[], uint32_t *count, lsm_flag flags); /** * Exports a file system via NFS, callback function signature * @param[in] c Valid lsm plug-in pointer * @param[in] fs_id File system id to export * @param[in] export_path NFS export path * @param[in] root_list List of servers with root access * @param[in] rw_list List of servers with read/write access * @param[in] ro_list List of servers with read only access * @param[in] anon_uid UID to be mapped to anonymous * @param[in] anon_gid GID to be mapped to anonymous * @param[in] auth_type Client authentication type * @param[in] options Options * @param[out] exported Newly created export * @param[in] flags Reserved * @return LSM_ERR_OK, else error reason */ typedef int (*lsm_plug_nfs_export_fs) (lsm_plugin_ptr c, const char *fs_id, const char *export_path, lsm_string_list *root_list, lsm_string_list *rw_list, lsm_string_list *ro_list, uint64_t anon_uid, uint64_t anon_gid, const char *auth_type, const char *options, lsm_nfs_export **exported, lsm_flag flags); /** * Removes a NFS export, callback function signature * @param[in] c Valid lsm plug-in pointer * @param[in] e Export to remove * @param[in] flags Reserved * @return LSM_ERR_OK, else error reason */ typedef int (*lsm_plug_nfs_export_remove) (lsm_plugin_ptr c, lsm_nfs_export *e, lsm_flag flags); /** \struct lsm_san_ops_v1 * \brief Block array oriented functions (callback functions) * NOTE: This structure cannot change as we need to maintain backwards * compatibility */ struct lsm_san_ops_v1 { lsm_plug_volume_list vol_get; /**^ retrieving volumes */ lsm_plug_disk_list disk_get; /**^ retrieve disks */ lsm_plug_volume_create vol_create; /**^ creating a lun */ lsm_plug_volume_replicate vol_replicate; /**^ replicating lun */ lsm_plug_volume_replicate_range_block_size vol_rep_range_bs; /**^ volume replication range block size */ lsm_plug_volume_replicate_range vol_rep_range; /**^ volume replication range */ lsm_plug_volume_resize vol_resize; /**^ resizing a volume */ lsm_plug_volume_delete vol_delete; /**^ deleting a volume */ lsm_plug_volume_enable vol_enable; /**^ volume is accessible */ lsm_plug_volume_disable vol_disable; /**^ volume is unaccessible */ lsm_plug_iscsi_chap_auth iscsi_chap_auth; /**^ iscsi chap authentication */ lsm_plug_access_group_list ag_list; /**^ access groups */ lsm_plug_access_group_create ag_create; /**^ access group create */ lsm_plug_access_group_delete ag_delete; /**^ access group delete */ lsm_plug_access_group_initiator_add ag_add_initiator; /**^ adding an initiator to an access group */ lsm_plug_access_group_initiator_delete ag_del_initiator; /**^ deleting an initiator from an access group */ lsm_plug_volume_mask ag_grant; /**^ acess group grant */ lsm_plug_volume_unmask ag_revoke; /**^ access group revoke */ lsm_plug_volumes_accessible_by_access_group vol_accessible_by_ag; /**^ volumes accessible by access group */ lsm_plug_access_groups_granted_to_volume ag_granted_to_vol; /**^ access groups granted to a volume */ lsm_plug_volume_child_dependency vol_child_depends; /**^ volume child dependencies */ lsm_plug_volume_child_dependency_delete vol_child_depends_rm; /**^Callback to remove volume child dependencies */ lsm_plug_target_port_list target_port_list; /**^ Callback to get list of target ports */ }; /** \struct lsm_fs_ops_v1 * \brief File system oriented functionality * NOTE: This structure cannot change as we need to maintain backwards * compatibility */ struct lsm_fs_ops_v1 { lsm_plug_fs_list fs_list; /**^ list file systems */ lsm_plug_fs_create fs_create; /**^ create a file system */ lsm_plug_fs_delete fs_delete; /**^ delete a file system */ lsm_plug_fs_resize fs_resize; /**^ resize a file system */ lsm_plug_fs_clone fs_clone; /**^ clone a file system */ lsm_plug_fs_file_clone fs_file_clone; /**^ clone files on a file system */ lsm_plug_fs_child_dependency fs_child_dependency; /**^ check file system child dependencies */ lsm_plug_fs_child_dependency_delete fs_child_dependency_rm; /**^ remove file system child dependencies */ lsm_plug_fs_ss_list fs_ss_list; /**^ list snapshots */ lsm_plug_fs_ss_create fs_ss_create; /**^ create a snapshot */ lsm_plug_fs_ss_delete fs_ss_delete; /**^ delete a snapshot */ lsm_plug_fs_ss_restore fs_ss_restore; /**^ restore a snapshot */ }; /** \struct lsm_nas_ops_v1 * \brief NAS system oriented functionality call back functions * NOTE: This structure cannot change as we need to maintain backwards * compatibility */ struct lsm_nas_ops_v1 { lsm_plug_nfs_auth_types nfs_auth_types; /**^ List nfs authentication types */ lsm_plug_nfs_list nfs_list; /**^ List nfs exports */ lsm_plug_nfs_export_fs nfs_export; /**^ Export a file system */ lsm_plug_nfs_export_remove nfs_export_remove; /**^ Remove a file export */ }; /** * Query the RAID information of a volume * @param[in] c Valid lsm plug-in pointer * @param[in] volume Volume to be deleted * @param[out] raid_type Enum of lsm_volume_raid_type * @param[out] strip_size Size of the strip on each disk or other * storage extent. * @param[out] disk_count Count of of disks of RAID group(s) where this * volume allocated from. * @param[out] min_io_size Minimum I/O size, also the preferred I/O size * of random I/O. * @param[out] opt_io_size Optimal I/O size, also the preferred I/O size * of sequential I/O. * @param[in] flags Reserved * @return LSM_ERR_OK, else error reason */ typedef int (*lsm_plug_volume_raid_info) (lsm_plugin_ptr c, lsm_volume *volume, lsm_volume_raid_type *raid_type, uint32_t *strip_size, uint32_t *disk_count, uint32_t *min_io_size, uint32_t *opt_io_size, lsm_flag flags); /** * Retrieves the membership of given pool. New in version 1.2. * @param[in] c Valid lsm plug-in pointer * @param[in] pool The lsm_pool ptr. * @param[out] raid_type * Enum of lsm_volume_raid_type. * @param[out] member_type * Enum of lsm_pool_member_type. * @param[out] member_ids * The pointer of lsm_string_list pointer. * When 'member_type' is LSM_POOL_MEMBER_TYPE_POOL, * the 'member_ids' will contain a list of parent Pool * IDs. * When 'member_type' is LSM_POOL_MEMBER_TYPE_DISK, * the 'member_ids' will contain a list of disk IDs. * When 'member_type' is LSM_POOL_MEMBER_TYPE_OTHER or * LSM_POOL_MEMBER_TYPE_UNKNOWN, the member_ids should * be NULL. * @param[in] flags Reserved, set to 0 * @return LSM_ERR_OK on success else error reason. */ typedef int (*lsm_plug_pool_member_info) (lsm_plugin_ptr c, lsm_pool *pool, lsm_volume_raid_type *raid_type, lsm_pool_member_type *member_type, lsm_string_list **member_ids, lsm_flag flags); /** * Query all supported RAID types and strip sizes which could be used * in lsm_volume_raid_create() functions. * New in version 1.2, only available for hardware RAID cards. * @param[in] c Valid lsm plug-in pointer * @param[in] system * The lsm_sys type. * @param[out] supported_raid_types * The pointer of uint32_t array. Containing * lsm_volume_raid_type values. * @param[out] supported_raid_type_count * The pointer of uint32_t. Indicate the item count of * supported_raid_types array. * @param[out] supported_strip_sizes * The pointer of uint32_t array. Containing * all supported strip sizes. * @param[out] supported_strip_size_count * The pointer of uint32_t. Indicate the item count of * supported_strip_sizes array. * @param[in] flags Reserved, set to 0 * @return LSM_ERR_OK on success else error reason. */ typedef int (*lsm_plug_volume_raid_create_cap_get) (lsm_plugin_ptr c, lsm_system *system, uint32_t **supported_raid_types, uint32_t *supported_raid_type_count, uint32_t **supported_strip_sizes, uint32_t *supported_strip_size_count, lsm_flag flags); /** * Create a disk RAID pool and allocate entire full space to new volume. * New in version 1.2, only available for hardware RAID cards. * @param[in] c Valid lsm plug-in pointer * @param[in] name String. Name for the new volume. It might be ignored or * altered on some hardwardware raid cards in order to fit * their limitation. * @param[in] raid_type * Enum of lsm_volume_raid_type. * @param[in] disks * An array of lsm_disk types * @param[in] disk_count * The count of lsm_disk in 'disks' argument. * @param[in] strip_size * uint32_t. The strip size in bytes. * @param[out] new_volume * Newly created volume, Pointer to the lsm_volume type * pointer. * @param[in] flags Reserved, set to 0 * @return LSM_ERR_OK on success else error reason. */ typedef int (*lsm_plug_volume_raid_create) (lsm_plugin_ptr c, const char *name, lsm_volume_raid_type raid_type, lsm_disk * disks[], uint32_t disk_count, uint32_t strip_size, lsm_volume ** new_volume, lsm_flag flags); /** \struct lsm_ops_v1_2 * \brief Functions added in version 1.2 * NOTE: This structure will change during the developement util version 1.2 * released. */ struct lsm_ops_v1_2 { lsm_plug_volume_raid_info vol_raid_info; /**^ Query volume RAID information*/ lsm_plug_pool_member_info pool_member_info; lsm_plug_volume_raid_create_cap_get vol_create_raid_cap_get; lsm_plug_volume_raid_create vol_create_raid; }; /** * Copies the memory pointed to by item with given type t. * @param t Type of item to copy * @param item Pointer to src * @return Null, else copy of item. */ void LSM_DLL_EXPORT *lsm_data_type_copy(lsm_data_type t, void *item); /** * Initializes the plug-in. * @param argc Command line argument count * @param argv Command line arguments * @param reg Registration function * @param unreg Un-Registration function * @param desc Plug-in description * @param version Plug-in version * @return exit code for plug-in */ int LSM_DLL_EXPORT lsm_plugin_init_v1(int argc, char *argv[], lsm_plugin_register reg, lsm_plugin_unregister unreg, const char *desc, const char *version); /** * Used to register all the data needed for the plug-in operation. * @param plug Pointer provided by the framework * @param private_data Private data to be used for whatever the plug-in needs * @param mgm_ops Function pointers for management operations * @param san_ops Function pointers for SAN operations * @param fs_ops Function pointers for file system operations * @param nas_ops Function pointers for NAS operations * @return LSM_ERR_OK on success, else error reason. */ int LSM_DLL_EXPORT lsm_register_plugin_v1(lsm_plugin_ptr plug, void *private_data, struct lsm_mgmt_ops_v1 *mgm_ops, struct lsm_san_ops_v1 *san_ops, struct lsm_fs_ops_v1 *fs_ops, struct lsm_nas_ops_v1 *nas_ops); /** * Used to register version 1.2 APIs plug-in operation. * @param plug Pointer provided by the framework * @param private_data Private data to be used for whatever the plug-in * needs * @param mgm_ops Function pointers for struct lsm_mgmt_ops_v1 * @param san_ops Function pointers for struct lsm_san_ops_v1 * @param fs_ops Function pointers for struct lsm_fs_ops_v1 * @param nas_ops Function pointers for struct lsm_nas_ops_v1 * @param ops_v1_2 Function pointers for struct lsm_ops_v1_2 * @return LSM_ERR_OK on success, else error reason. */ int LSM_DLL_EXPORT lsm_register_plugin_v1_2(lsm_plugin_ptr plug, void *private_data, struct lsm_mgmt_ops_v1 *mgm_ops, struct lsm_san_ops_v1 *san_ops, struct lsm_fs_ops_v1 *fs_ops, struct lsm_nas_ops_v1 *nas_ops, struct lsm_ops_v1_2 *ops_v1_2); /** * Used to retrieve private data for plug-in operation. * @param plug Opaque plug-in pointer. */ void LSM_DLL_EXPORT *lsm_private_data_get(lsm_plugin_ptr plug); /** * Logs an error with the plug-in * @param plug Plug-in pointer * @param code Error code to return * @param msg String message * @return returns code */ int LSM_DLL_EXPORT lsm_log_error_basic(lsm_plugin_ptr plug, lsm_error_number code, const char *msg); /** * Return an error with the plug-in * @param plug Opaque plug-in * @param error Error to associate. * @return LSM_ERR_OK, else error reason. */ int LSM_DLL_EXPORT lsm_plugin_error_log(lsm_plugin_ptr plug, lsm_error_ptr error); /** * Creates an error record. * @param code * @param msg * @param exception * @param debug * @param debug_data * @param debug_data_size * @return Null on error, else valid error error record. */ lsm_error_ptr LSM_DLL_EXPORT lsm_error_create(lsm_error_number code, const char *msg, const char *exception, const char *debug, const void *debug_data, uint32_t debug_data_size); /** * Plug-in macros for creating errors */ #define LSM_ERROR_CREATE_PLUGIN_MSG(code, msg) \ lsm_error_create(code, msg, NULL, NULL, NULL, 0) #define LSM_ERROR_CREATE_PLUGIN_EXCEPTION(code, msg, exception) \ lsm_error_create((code), (msg), (exception), NULL, NULL, 0) #define LSM_ERROR_CREATE_PLUGIN_DEBUG(code, msg, exception, debug, \ debug_data, debug_len) \ lsm_error_create((code), (msg), (exception), (debug), \ (debug_data), debug_len) /** * Helper function to create an array of lsm_pool * * @param size Number of elements * @return Valid pointer or NULL on error. */ lsm_pool LSM_DLL_EXPORT **lsm_pool_record_array_alloc(uint32_t size); /** * Used to set the free space on a pool record * @param p Pool to modify * @param free_space New free space value */ void LSM_DLL_EXPORT lsm_pool_free_space_set(lsm_pool *p, uint64_t free_space); /** * Helper function to allocate a pool record. * @param id System unique identifier * @param name Human readable name * @param element_type A bit field which states what the pool can be used to * create * @param unsupported_actions Things you cannot do with this pool * @param total_space Total space * @param free_space Space available * @param status Pool status, bit field (See LSM_POOL_STATUS_XXXX * constants) * @param status_info Additional textual information on status * @param system_id System id * @param plugin_data Reserved for plugin writer use * @return LSM_ERR_OK on success, else error reason. */ lsm_pool LSM_DLL_EXPORT *lsm_pool_record_alloc(const char *id, const char *name, uint64_t element_type, uint64_t unsupported_actions, uint64_t total_space, uint64_t free_space, uint64_t status, const char *status_info, const char *system_id, const char *plugin_data); /** * Used to retrieve the plugin-private data for a specfic pool * @param p Pool to retrieve plugin private data for * @return NULL if donesn't exists, else data. */ const char LSM_DLL_EXPORT *lsm_pool_plugin_data_get(lsm_pool *p); /** * Allocate the storage needed for and array of Volume records. * @param size Number of elements. * @return Allocated memory or NULL on error. */ lsm_volume LSM_DLL_EXPORT **lsm_volume_record_array_alloc(uint32_t size); /** * Allocate the storage needed for tan array of disk records. * @param size Number of elements * @return Allocated memory or null on error. */ lsm_disk LSM_DLL_EXPORT **lsm_disk_record_array_alloc(uint32_t size); /** * Allocate a disk record. * @param id Identification * @param name Human readable name * @param disk_type Enumerated disk type * @param block_size Number of bytes per logical block * @param block_count Number of blocks for disk * @param disk_status Status * @param system_id System id this disk resides in * @return Pointer to allocated disk record or NULL on memory error. */ lsm_disk LSM_DLL_EXPORT *lsm_disk_record_alloc(const char *id, const char *name, lsm_disk_type disk_type, uint64_t block_size, uint64_t block_count, uint64_t disk_status, const char *system_id); /** * Allocated the storage needed for one volume record. * @param id ID * @param name Name * @param vpd83 SCSI vpd 83 id * @param block_size Volume block size * @param number_of_blocks Volume number of blocks * @param status Volume status * @param system_id System id * @param pool_id Pool id this volume is created from * @param plugin_data Private data for plugin use * @return Allocated memory or NULL on error. */ lsm_volume LSM_DLL_EXPORT *lsm_volume_record_alloc(const char *id, const char *name, const char *vpd83, uint64_t block_size, uint64_t number_of_blocks, uint32_t status, const char *system_id, const char *pool_id, const char *plugin_data); /** * Retrieve the private plug-in data from the volume record. * @param v Volume pointer * @return Private data, else NULL if it doesn't exist. */ const char LSM_DLL_EXPORT *lsm_volume_plugin_data_get(lsm_volume *v); /** * Allocate the storage needed for and array of System records. * @param size Number of elements. * @return Allocated memory or NULL on error. */ lsm_system LSM_DLL_EXPORT **lsm_system_record_array_alloc(uint32_t size); /** * Allocates the storage for one system record. * @param[in] id Id * @param[in] name System name (human readable) * @param[in] status Status of the system * @param[in] status_info Additional text for status * @param[in] plugin_data Private plugin data * @return Allocated memory or NULL on error. */ lsm_system LSM_DLL_EXPORT *lsm_system_record_alloc(const char *id, const char *name, uint32_t status, const char *status_info, const char *plugin_data); /** * Retrieve plugin private data * @param s System * @return Optional data, NULL if none exist */ const char LSM_DLL_EXPORT *lsm_system_plugin_data_get(lsm_system *s); /** * Allocates storage for Access_group array * @param size Number of elements to store. * @return NULL on error, else pointer to array for use. */ lsm_access_group LSM_DLL_EXPORT ** lsm_access_group_record_array_alloc(uint32_t size); /** * Allocates storage for single Access_group * @param id ID of access group * @param name Name of access group * @param initiators List of initiators, can be NULL * @param init_type Initiator group type * @param system_id System id * @param plugin_data Reserved for plug-in use only * @return NULL on error, else valid lsm_access_group pointer. */ lsm_access_group LSM_DLL_EXPORT * lsm_access_group_record_alloc(const char *id, const char *name, lsm_string_list *initiators, lsm_access_group_init_type init_type, const char *system_id, const char *plugin_data); /** * Use to change the list of initiators associated with an access group. * @param group Access group to change initiators for * @param il String list of initiators. */ void LSM_DLL_EXPORT lsm_access_group_initiator_id_set(lsm_access_group *group, lsm_string_list *il); /** * Allocates memory for a file system record * @param id ID of file system * @param name Name of file system * @param total_space Total space * @param free_space Free space * @param pool_id Pool id * @param system_id System id * @param plugin_data Reserved for plug-in use only * @return lsm_fs, NULL on error */ lsm_fs LSM_DLL_EXPORT *lsm_fs_record_alloc(const char *id, const char *name, uint64_t total_space, uint64_t free_space, const char *pool_id, const char *system_id, const char *plugin_data); /** * Allocates the memory for the array of file system records. * @param size Number of elements * @return Allocated memory, NULL on error */ lsm_fs LSM_DLL_EXPORT **lsm_fs_record_array_alloc(uint32_t size); /** * Used to retrieve the plug-in private data for a specific pool * @param fs FS to retrieve plug-in private data for * @return NULL if doesn't exist, else data. */ const char LSM_DLL_EXPORT *lsm_fs_plugin_data_get(lsm_fs *fs); /** * Allocates the memory for single snap shot record. * @param id ID * @param name Name * @param ts Epoch time stamp when snapshot was created * @param plugin_data Private plugin data * @return Allocated memory, NULL on error */ lsm_fs_ss LSM_DLL_EXPORT *lsm_fs_ss_record_alloc(const char *id, const char *name, uint64_t ts, const char *plugin_data); /** * Allocates the memory for an array of snapshot records. * @param size Number of elements * @return Allocated memory, NULL on error */ lsm_fs_ss LSM_DLL_EXPORT **lsm_fs_ss_record_array_alloc(uint32_t size); /** * Retrieve private data from fs_ss. * @param fs_ss Valid fs_ss record * @return Private data, else NULL */ const char LSM_DLL_EXPORT *lsm_fs_ss_plugin_data_get(lsm_fs_ss * fs_ss); /** * Set a capability * @param cap Valid capability pointer * @param t Which capability to set * @param v Value of the capability * @return LSM_ERR_OK on success, else error reason. */ int LSM_DLL_EXPORT lsm_capability_set(lsm_storage_capabilities * cap, lsm_capability_type t, lsm_capability_value_type v); /** * Sets 1 or more capabilities with the same value v * @param cap Valid capability pointer * @param v The value to set capabilities to * @param ... Which capabilites to set (Make sure to terminate list * with a -1) * @return LSM_ERR_OK on success, else error reason */ int LSM_DLL_EXPORT lsm_capability_set_n(lsm_storage_capabilities * cap, lsm_capability_value_type v, ...); /** * Allocated storage for capabilities * @param value Set to NULL, used during serialization otherwise. * @return Allocated record, or NULL on memory allocation failure. */ lsm_storage_capabilities LSM_DLL_EXPORT * lsm_capability_record_alloc(char const *value); /** * Convenience function for plug-in writer. * Note: Make sure to free returned items to prevent memory leaks. * @param[in] uri URI to parse * @param[out] scheme returned scheme * @param[out] user returned user * @param[out] server returned server * @param[out] port returned port * @param[out] path returned path * @param[out] query_params returned query params * @return LSM_ERR_OK on successful parse, else error reason. */ int LSM_DLL_EXPORT lsm_uri_parse(const char *uri, char **scheme, char **user, char **server, int *port, char **path, lsm_hash **query_params); /** * Provides for volume filtering when an array doesn't support this natively. * Note: Filters in place removing and freeing those that don't match. * @param search_key Search field * @param search_value Search value * @param[in,out] vols Array to filter * @param[in,out] count Number of volumes to filter, number remain */ void LSM_DLL_EXPORT lsm_plug_volume_search_filter(const char *search_key, const char *search_value, lsm_volume *vols[], uint32_t *count); /** * Provides for pool filtering when an array doesn't support this natively. * Note: Filters in place removing and freeing those that don't match. * @param search_key Search field * @param search_value Search value * @param[in,out] pools Array to filter * @param[in,out] count Number of pools to filter, number remain */ void LSM_DLL_EXPORT lsm_plug_pool_search_filter(const char *search_key, const char *search_value, lsm_pool *pools[], uint32_t *count); /** * Provides for disk filtering when an array doesn't support this natively. * Note: Filters in place removing and freeing those that don't match. * @param search_key Search field * @param search_value Search value * @param[in,out] disks Array to filter * @param[in,out] count Number of disks to filter, number remain */ void LSM_DLL_EXPORT lsm_plug_disk_search_filter(const char *search_key, const char *search_value, lsm_disk *disks[], uint32_t *count); /** * Provides for access group filtering when an array doesn't support this * natively. * Note: Filters in place removing and freeing those that don't match. * @param search_key Search field * @param search_value Search value * @param[in,out] ag Array to filter * @param[in,out] count Number of access groups to filter, number remain */ void LSM_DLL_EXPORT lsm_plug_access_group_search_filter(const char *search_key, const char *search_value, lsm_access_group *ag[], uint32_t *count); /** * Provides for fs filtering when an array doesn't support this natively. * Note: Filters in place removing and freeing those that don't match. * @param search_key Search field * @param search_value Search value * @param[in,out] fs Array to filter * @param[in,out] count Number of file systems to filter, number remain */ void LSM_DLL_EXPORT lsm_plug_fs_search_filter(const char *search_key, const char *search_value, lsm_fs *fs[], uint32_t *count); /** * Provides for nfs filtering when an array doesn't support this natively. * Note: Filters in place removing and freeing those that don't match. * @param search_key Search field * @param search_value Search value * @param[in,out] exports Array to filter * @param[in,out] count Number of nfs exports to filter, number remain */ void LSM_DLL_EXPORT lsm_plug_nfs_export_search_filter(const char *search_key, const char *search_value, lsm_nfs_export *exports[], uint32_t *count); /** * Retrieve private data from nfs export record. * @param exp Valid nfs export record * @return Private data, else NULL */ const char LSM_DLL_EXPORT * lsm_nfs_export_plugin_data_get(lsm_nfs_export *exp); /** * Allocate a target port * @param id ID of target port * @param port_type Port type * @param service_address Service address * @param network_address Network Address * @param physical_address Physical address * @param physical_name Physical name * @param system_id System ID * @param plugin_data Plug-in data * @return valid lsm_target_port, else NULL on memory allocation failure */ lsm_target_port LSM_DLL_EXPORT * lsm_target_port_record_alloc(const char *id, lsm_target_port_type port_type, const char *service_address, const char *network_address, const char *physical_address, const char *physical_name, const char *system_id, const char *plugin_data); /** * Retrieve the plug-in private data pointer * @param tp Valid target port pointer * @return Character pointer to string, NULL on error */ const char LSM_DLL_EXPORT * lsm_target_port_plugin_data_get(lsm_target_port *tp); /** * Allocated an array of target pointers * @param size Number of pointers to store * @return Allocated memory, NULL on allocation errors */ lsm_target_port LSM_DLL_EXPORT ** lsm_target_port_record_array_alloc(uint32_t size); /** * Provides for target port filtering when an array doesn't support this * natively. * Note: Filters in place removing and freeing those that don't match. * @param search_key Search field * @param search_value Search value * @param[in,out] tp Array to filter * @param[in,out] count Number of target ports to filter, number remain */ void LSM_DLL_EXPORT lsm_plug_target_port_search_filter(const char *search_key, const char *search_value, lsm_target_port *tp[], uint32_t *count); #ifdef __cplusplus } #endif #endif /* LIBSTORAGEMGMT_PLUG_INTERFACE_H */ libstoragemgmt-1.2.3/c_binding/include/libstoragemgmt/libstoragemgmt_capabilities.h0000664000175000017500000001605412537737032025760 00000000000000/* * Copyright (C) 2011-2014 Red Hat, Inc. * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; If not, see . * * Author: tasleson */ #ifndef LSM_CAPABILITIES_H #define LSM_CAPABILITIES_H #include "libstoragemgmt_common.h" #ifdef __cplusplus extern "C" { #endif /** @file libstoragemgmt_capabilities.h*/ /*Note: Domain is 0..255 */ /** \enum lsm_capability_value_type Possible values for supported feature*/ typedef enum { LSM_CAP_UNSUPPORTED = 0, /**< Feature is not supported */ LSM_CAP_SUPPORTED = 1 /**< Feature is supported */ } lsm_capability_value_type; /** \enum lsm_capability_value_type Capabilities supported by array */ typedef enum { LSM_CAP_VOLUMES = 20, /**^ List volumes */ LSM_CAP_VOLUME_CREATE = 21, /**^ Create volumes */ LSM_CAP_VOLUME_RESIZE = 22, /**^ Resize volumes */ LSM_CAP_VOLUME_REPLICATE = 23, /**^ Replication is supported */ LSM_CAP_VOLUME_REPLICATE_CLONE = 24, /**^ Can make a space efficient copy of volume */ LSM_CAP_VOLUME_REPLICATE_COPY = 25, /**^ Can make a bitwise copy of volume */ LSM_CAP_VOLUME_REPLICATE_MIRROR_ASYNC = 26, /**^ Mirror data with delay */ LSM_CAP_VOLUME_REPLICATE_MIRROR_SYNC = 27, /**^ Mirror data and always in sync */ LSM_CAP_VOLUME_COPY_RANGE_BLOCK_SIZE = 28, /**^ Size of a block for range operations */ LSM_CAP_VOLUME_COPY_RANGE = 29, /**^ Sub volume replication support */ LSM_CAP_VOLUME_COPY_RANGE_CLONE = 30, /**^ Can space efficient copy a region(s) of a volume*/ LSM_CAP_VOLUME_COPY_RANGE_COPY = 31, /**^ Can copy a region(s) of a volume */ LSM_CAP_VOLUME_DELETE = 33, /**^ Can delete a volume */ LSM_CAP_VOLUME_ENABLE = 34, /**^ Enable volume*/ LSM_CAP_VOLUME_DISABLE = 35, /**^ Disable volume*/ LSM_CAP_VOLUME_MASK = 36, /**^ Grant an access group to a volume */ LSM_CAP_VOLUME_UNMASK = 37, /**^ Revoke access for an access group */ LSM_CAP_ACCESS_GROUPS = 38, /**^ List access groups */ LSM_CAP_ACCESS_GROUP_CREATE_WWPN = 39, /**^ Create an access group */ LSM_CAP_ACCESS_GROUP_DELETE = 40, /**^ Delete an access group */ LSM_CAP_ACCESS_GROUP_INITIATOR_ADD_WWPN = 41, /**^ Add an initiator to an access group */ LSM_CAP_ACCESS_GROUP_INITIATOR_DELETE = 42, /**^ Remove an initiator from an access group */ LSM_CAP_VOLUMES_ACCESSIBLE_BY_ACCESS_GROUP = 43, /**^ Retrieve a list of volumes accessible by an access group */ LSM_CAP_ACCESS_GROUPS_GRANTED_TO_VOLUME = 44, /**^ Retrieve a list of what access groups are accessible for a given * volume */ LSM_CAP_VOLUME_CHILD_DEPENDENCY = 45, /**^ Used to determine if a volume has any dependencies */ LSM_CAP_VOLUME_CHILD_DEPENDENCY_RM = 46, /**^ Removes dependendies */ LSM_CAP_ACCESS_GROUP_CREATE_ISCSI_IQN = 47, /**^ Create iSCSI access group */ LSM_CAP_ACCESS_GROUP_INITIATOR_ADD_ISCSI_IQN = 48, /**^ For empty access group, this indicates it can add iSCSI IQN to it */ LSM_CAP_VOLUME_ISCSI_CHAP_AUTHENTICATION = 53, /**^ If you can configure iSCSI chap authentication */ LSM_CAP_VOLUME_RAID_INFO = 54, /** ^ If you can query RAID information from volume */ LSM_CAP_VOLUME_THIN = 55, /**^ Thin provisioned volumes are supported */ LSM_CAP_FS = 100, /**^ List file systems */ LSM_CAP_FS_DELETE = 101, /**^ Delete a file system */ LSM_CAP_FS_RESIZE = 102, /**^ Resize a file system */ LSM_CAP_FS_CREATE = 103, /**^ Create a file system */ LSM_CAP_FS_CLONE = 104, /**^ Clone a file system */ LSM_CAP_FILE_CLONE = 105, /**^ Clone a file on a file system */ LSM_CAP_FS_SNAPSHOTS = 106, /**^ List FS snapshots */ LSM_CAP_FS_SNAPSHOT_CREATE = 107, /**^ Create a snapshot */ LSM_CAP_FS_SNAPSHOT_DELETE = 109, /**^ Delete a snapshot */ LSM_CAP_FS_SNAPSHOT_RESTORE = 110, /**^ Revert the state of a FS to the specified snapshot */ LSM_CAP_FS_SNAPSHOT_RESTORE_SPECIFIC_FILES = 111, /**^ Revert the state of a list of files to a specified snapshot */ LSM_CAP_FS_CHILD_DEPENDENCY = 112, /**^ Determine if a child dependency exists for the specified file */ LSM_CAP_FS_CHILD_DEPENDENCY_RM = 113, /**^ Remove any dependencies the file system may have */ LSM_CAP_FS_CHILD_DEPENDENCY_RM_SPECIFIC_FILES = 114, /**^ Remove any dependencies for specific files */ LSM_CAP_EXPORT_AUTH = 120, /**^ Get a list of supported client authentication types */ LSM_CAP_EXPORTS = 121, /**^ List exported file systems */ LSM_CAP_EXPORT_FS = 122, /**^ Export a file system */ LSM_CAP_EXPORT_REMOVE = 123, /**^ Remove an export */ LSM_CAP_EXPORT_CUSTOM_PATH = 124, /**^ Plug-in allows user to define custome export path */ LSM_CAP_POOLS_QUICK_SEARCH = 210, /**^ Seach occurs on array */ LSM_CAP_VOLUMES_QUICK_SEARCH = 211, /**^ Seach occurs on array */ LSM_CAP_DISKS_QUICK_SEARCH = 212, /**^ Seach occurs on array */ LSM_CAP_ACCESS_GROUPS_QUICK_SEARCH = 213, /**^ Seach occurs on array */ LSM_CAP_FS_QUICK_SEARCH = 214, /**^ Seach occurs on array */ LSM_CAP_NFS_EXPORTS_QUICK_SEARCH = 215, /**^ Seach occurs on array */ LSM_CAP_TARGET_PORTS = 216, /**^ List target ports */ LSM_CAP_TARGET_PORTS_QUICK_SEARCH = 217, /**^ Filtering occurs on array */ LSM_CAP_DISKS = 220, /**^ List disk drives */ LSM_CAP_POOL_MEMBER_INFO = 221, /**^ Query pool member information */ LSM_CAP_VOLUME_RAID_CREATE = 222, /**^ Create RAID volume */ } lsm_capability_type; /** * Free the memory used by the storage capabilities data structure * @param cap Valid storage capability data structure. * @return LSM_ERR_OK on success, else error reason. */ int LSM_DLL_EXPORT lsm_capability_record_free(lsm_storage_capabilities *cap); /** * Return the capability for the specified feature. * @param cap Valid pointer to capability data structure * @param t Which capability you are interested in * @return Value of supported enumerated type. */ lsm_capability_value_type LSM_DLL_EXPORT lsm_capability_get(lsm_storage_capabilities * cap, lsm_capability_type t); /** * Boolean version of capability support * @param cap * @param t * @return Non-zero if supported, 0 if not supported */ int LSM_DLL_EXPORT lsm_capability_supported(lsm_storage_capabilities *cap, lsm_capability_type t); #ifdef __cplusplus } #endif #endif libstoragemgmt-1.2.3/c_binding/include/libstoragemgmt/Makefile.am0000664000175000017500000000166112537546123022107 00000000000000## Process this file with automake to produce Makefile.in ## Copyright (C) 2005-2011 Red Hat, Inc. ## See COPYING.LIB for the License of this software lsmincdir = $(includedir)/libstoragemgmt lsminc_HEADERS = \ libstoragemgmt.h \ libstoragemgmt_accessgroups.h \ libstoragemgmt_blockrange.h \ libstoragemgmt_capabilities.h \ libstoragemgmt_common.h \ libstoragemgmt_disk.h \ libstoragemgmt_error.h \ libstoragemgmt_fs.h \ libstoragemgmt_nfsexport.h \ libstoragemgmt_hash.h \ libstoragemgmt_plug_interface.h \ libstoragemgmt_pool.h \ libstoragemgmt_snapshot.h \ libstoragemgmt_systems.h \ libstoragemgmt_targetport.h \ libstoragemgmt_types.h \ libstoragemgmt_version.h \ libstoragemgmt_volumes.h install-exec-hook: $(mkinstalldirs) $(DESTDIR)$(lsmincdir) libstoragemgmt-1.2.3/c_binding/include/libstoragemgmt/libstoragemgmt_blockrange.h0000664000175000017500000000577612537737032025447 00000000000000/* * Copyright (C) 2011-2014 Red Hat, Inc. * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; If not, see . * * Author: tasleson */ #ifndef LSM_BLOCKRANGE_H #define LSM_BLOCKRANGE_H #include "libstoragemgmt_common.h" #ifdef __cplusplus extern "C" { #endif /** * Creates memory for opaque data type to store a block range * @param source_start Source block number to replicate from * @param dest_start Dest block number to replicate to * @param block_count Number of blocks to replicate * @return Valid block range ptr, otherwise NULL */ lsm_block_range LSM_DLL_EXPORT * lsm_block_range_record_alloc(uint64_t source_start, uint64_t dest_start, uint64_t block_count); /** * Frees a block range record. * @param br Block range to free * @return LSM_ERR_OK on success, else error reason. */ int LSM_DLL_EXPORT lsm_block_range_record_free(lsm_block_range * br); /** * Copies a block range. * @param source Source of the copy * @return copy of source */ lsm_block_range LSM_DLL_EXPORT * lsm_block_range_record_copy(lsm_block_range * source); /** * Allocates storage for an array of block ranges. * @param size Number of elements to store. * @return Pointer to memory for array of block ranges. */ lsm_block_range LSM_DLL_EXPORT ** lsm_block_range_record_array_alloc(uint32_t size); /** * Frees the memory for the array and all records contained in it. * @param br Array of block ranges to free * @param size Number of elements in array. * @return LSM_ERR_OK on success, else error reason. */ int LSM_DLL_EXPORT lsm_block_range_record_array_free(lsm_block_range *br[], uint32_t size); /** * Retrieves the source block address. * @param br Valid block range pointer * @return value of source start. */ uint64_t LSM_DLL_EXPORT lsm_block_range_source_start_get(lsm_block_range *br); /** * Retrieves the dest block address. * @param br Valid block range pointer * @return value of dest start. */ uint64_t LSM_DLL_EXPORT lsm_block_range_dest_start_get(lsm_block_range *br); /** * Retrieves the number of blocks to replicate. * @param br Valid block range pointer * @return value of number of blocks */ uint64_t LSM_DLL_EXPORT lsm_block_range_block_count_get(lsm_block_range *br); #ifdef __cplusplus } #endif #endif libstoragemgmt-1.2.3/c_binding/include/libstoragemgmt/libstoragemgmt_version.h0000664000175000017500000000162112542455451025004 00000000000000/* * Copyright (C) 2011-2013 Red Hat, Inc. * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; If not, see . * * Author: tasleson */ #ifndef LSM_VERSION_H_ #define LSM_VERSION_H_ #define LSM_MAJOR 1 #define LSM_MINOR 2 #define LSM_MICRO 3 #define LSM_VERSION ((LSM_MAJOR * 10000) + (LSM_MINOR * 100) + LSM_MICRO) #endif libstoragemgmt-1.2.3/c_binding/include/libstoragemgmt/libstoragemgmt_hash.h0000664000175000017500000000507312537737032024251 00000000000000/* * Copyright (C) 2014 Red Hat, Inc. * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; If not, see . * * Author: tasleson * */ #ifndef LIBSTORAGEMGMT_HASH_H #define LIBSTORAGEMGMT_HASH_H #include "libstoragemgmt_common.h" #ifdef __cplusplus extern "C" { #endif /** * Simple hash table which only stores character strings. */ /** * Allocate storage for hash. * @return Allocated record or NULL on memory allocation failure */ lsm_hash LSM_DLL_EXPORT *lsm_hash_alloc(void); /** * Free a lsm hash * @param op Record to free. * @return LSM_ERR_OK on success, else error reason. */ int LSM_DLL_EXPORT lsm_hash_free(lsm_hash *op); /** * Get the list of 'keys' available in the hash * @param [in] op Valid optional data pointer * @param [out] l String list pointer * @return LSM_ERR_OK on success, else error reason */ int LSM_DLL_EXPORT lsm_hash_keys(lsm_hash *op, lsm_string_list **l); /** * Get the value of a key (string) * @param [in] op Valid optional data pointer * @param [in] key Key to retrieve value for * @return Pointer to value, pointer valid until optional data memory * gets released. */ const char LSM_DLL_EXPORT *lsm_hash_string_get(lsm_hash *op, const char *key); /** * Set the value of a key. * Note: If key exists, it is replaced with new one. * @param [in] op Valid optional data pointer * @param [in] key Key to set value for (key is duped) * @param [in] value Value of new key (string is duped) * @return LSM_ERR_OK on success, else error reason */ int LSM_DLL_EXPORT lsm_hash_string_set(lsm_hash *op, const char *key, const char *value); /** * Does a copy of an lsm_hash * @param src lsm_hash to copy * @return NULL on error/memory allocation failure, else copy */ lsm_hash LSM_DLL_EXPORT *lsm_hash_copy(lsm_hash *src); #ifdef __cplusplus } #endif #endif /* LIBSTORAGEMGMT_HASH_H */ libstoragemgmt-1.2.3/c_binding/include/libstoragemgmt/Makefile.in0000664000175000017500000004425612542455445022131 00000000000000# Makefile.in generated by automake 1.13.4 from Makefile.am. # @configure_input@ # Copyright (C) 1994-2013 Free Software Foundation, Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__is_gnu_make = test -n '$(MAKEFILE_LIST)' && test -n '$(MAKELEVEL)' am__make_running_with_option = \ case $${target_option-} in \ ?) ;; \ *) echo "am__make_running_with_option: internal error: invalid" \ "target option '$${target_option-}' specified" >&2; \ exit 1;; \ esac; \ has_opt=no; \ sane_makeflags=$$MAKEFLAGS; \ if $(am__is_gnu_make); then \ sane_makeflags=$$MFLAGS; \ else \ case $$MAKEFLAGS in \ *\\[\ \ ]*) \ bs=\\; \ sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \ | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \ esac; \ fi; \ skip_next=no; \ strip_trailopt () \ { \ flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \ }; \ for flg in $$sane_makeflags; do \ test $$skip_next = yes && { skip_next=no; continue; }; \ case $$flg in \ *=*|--*) continue;; \ -*I) strip_trailopt 'I'; skip_next=yes;; \ -*I?*) strip_trailopt 'I';; \ -*O) strip_trailopt 'O'; skip_next=yes;; \ -*O?*) strip_trailopt 'O';; \ -*l) strip_trailopt 'l'; skip_next=yes;; \ -*l?*) strip_trailopt 'l';; \ -[dEDm]) skip_next=yes;; \ -[JT]) skip_next=yes;; \ esac; \ case $$flg in \ *$$target_option*) has_opt=yes; break;; \ esac; \ done; \ test $$has_opt = yes am__make_dryrun = (target_option=n; $(am__make_running_with_option)) am__make_keepgoing = (target_option=k; $(am__make_running_with_option)) pkgdatadir = $(datadir)/@PACKAGE@ pkgincludedir = $(includedir)/@PACKAGE@ pkglibdir = $(libdir)/@PACKAGE@ pkglibexecdir = $(libexecdir)/@PACKAGE@ am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ subdir = c_binding/include/libstoragemgmt DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/Makefile.am \ $(srcdir)/libstoragemgmt_version.h.in $(lsminc_HEADERS) ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = $(top_srcdir)/m4/ax_python_module.m4 \ $(top_srcdir)/m4/libtool.m4 $(top_srcdir)/m4/ltoptions.m4 \ $(top_srcdir)/m4/ltsugar.m4 $(top_srcdir)/m4/ltversion.m4 \ $(top_srcdir)/m4/lt~obsolete.m4 $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) mkinstalldirs = $(install_sh) -d CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = libstoragemgmt_version.h CONFIG_CLEAN_VPATH_FILES = AM_V_P = $(am__v_P_@AM_V@) am__v_P_ = $(am__v_P_@AM_DEFAULT_V@) am__v_P_0 = false am__v_P_1 = : AM_V_GEN = $(am__v_GEN_@AM_V@) am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@) am__v_GEN_0 = @echo " GEN " $@; am__v_GEN_1 = AM_V_at = $(am__v_at_@AM_V@) am__v_at_ = $(am__v_at_@AM_DEFAULT_V@) am__v_at_0 = @ am__v_at_1 = SOURCES = DIST_SOURCES = am__can_run_installinfo = \ case $$AM_UPDATE_INFO_DIR in \ n|no|NO) false;; \ *) (install-info --version) >/dev/null 2>&1;; \ esac am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; am__vpath_adj = case $$p in \ $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ *) f=$$p;; \ esac; am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`; am__install_max = 40 am__nobase_strip_setup = \ srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'` am__nobase_strip = \ for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||" am__nobase_list = $(am__nobase_strip_setup); \ for p in $$list; do echo "$$p $$p"; done | \ sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \ $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \ if (++n[$$2] == $(am__install_max)) \ { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \ END { for (dir in files) print dir, files[dir] }' am__base_list = \ sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \ sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g' am__uninstall_files_from_dir = { \ test -z "$$files" \ || { test ! -d "$$dir" && test ! -f "$$dir" && test ! -r "$$dir"; } \ || { echo " ( cd '$$dir' && rm -f" $$files ")"; \ $(am__cd) "$$dir" && rm -f $$files; }; \ } am__installdirs = "$(DESTDIR)$(lsmincdir)" HEADERS = $(lsminc_HEADERS) am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) # Read a list of newline-separated strings from the standard input, # and print each of them once, without duplicates. Input order is # *not* preserved. am__uniquify_input = $(AWK) '\ BEGIN { nonempty = 0; } \ { items[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in items) print i; }; } \ ' # Make sure the list of sources is unique. This is necessary because, # e.g., the same source file might be shared among _SOURCES variables # for different programs/libraries. am__define_uniq_tagged_files = \ list='$(am__tagged_files)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | $(am__uniquify_input)` ETAGS = etags CTAGS = ctags DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) ACLOCAL = @ACLOCAL@ AMTAR = @AMTAR@ AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@ AR = @AR@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ FGREP = @FGREP@ GREP = @GREP@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ JSON_CFLAGS = @JSON_CFLAGS@ JSON_LIBS = @JSON_LIBS@ LD = @LD@ LDFLAGS = @LDFLAGS@ LIBCHECK_CFLAGS = @LIBCHECK_CFLAGS@ LIBCHECK_LIBS = @LIBCHECK_LIBS@ LIBCONFIG_CFLAGS = @LIBCONFIG_CFLAGS@ LIBCONFIG_LIBS = @LIBCONFIG_LIBS@ LIBGLIB_CFLAGS = @LIBGLIB_CFLAGS@ LIBGLIB_LIBS = @LIBGLIB_LIBS@ LIBMICROHTTPD_CFLAGS = @LIBMICROHTTPD_CFLAGS@ LIBMICROHTTPD_LIBS = @LIBMICROHTTPD_LIBS@ LIBOBJS = @LIBOBJS@ LIBS = @LIBS@ LIBSM_LIBTOOL_VERSION = @LIBSM_LIBTOOL_VERSION@ LIBSM_MAJOR_VERSION = @LIBSM_MAJOR_VERSION@ LIBSM_MICRO_VERSION = @LIBSM_MICRO_VERSION@ LIBSM_MINOR_VERSION = @LIBSM_MINOR_VERSION@ LIBSM_VERSION = @LIBSM_VERSION@ LIBSM_VERSION_INFO = @LIBSM_VERSION_INFO@ LIBSM_VERSION_NUMBER = @LIBSM_VERSION_NUMBER@ LIBTOOL = @LIBTOOL@ LIBXML_CFLAGS = @LIBXML_CFLAGS@ LIBXML_LIBS = @LIBXML_LIBS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBOBJS = @LTLIBOBJS@ MAINT = @MAINT@ MAKEINFO = @MAKEINFO@ MANIFEST_TOOL = @MANIFEST_TOOL@ MKDIR_P = @MKDIR_P@ NM = @NM@ NMEDIT = @NMEDIT@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_URL = @PACKAGE_URL@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PERL = @PERL@ PKG_CONFIG = @PKG_CONFIG@ PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@ PKG_CONFIG_PATH = @PKG_CONFIG_PATH@ PYTHON = @PYTHON@ PYTHON_EXEC_PREFIX = @PYTHON_EXEC_PREFIX@ PYTHON_PLATFORM = @PYTHON_PLATFORM@ PYTHON_PREFIX = @PYTHON_PREFIX@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SSL_LIBS = @SSL_LIBS@ STRIP = @STRIP@ VERSION = @VERSION@ YAJL_LIBS = @YAJL_LIBS@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_AR = @ac_ct_AR@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ bashcompletiondir = @bashcompletiondir@ bindir = @bindir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ datadir = @datadir@ datarootdir = @datarootdir@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ install_sh = @install_sh@ libdir = @libdir@ libexecdir = @libexecdir@ localedir = @localedir@ localstatedir = @localstatedir@ mandir = @mandir@ mkdir_p = @mkdir_p@ oldincludedir = @oldincludedir@ pdfdir = @pdfdir@ pkgpyexecdir = @pkgpyexecdir@ pkgpythondir = @pkgpythondir@ prefix = @prefix@ program_transform_name = @program_transform_name@ psdir = @psdir@ pyexecdir = @pyexecdir@ pythondir = @pythondir@ sbindir = @sbindir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ systemdsystemunitdir = @systemdsystemunitdir@ target_alias = @target_alias@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ lsmincdir = $(includedir)/libstoragemgmt lsminc_HEADERS = \ libstoragemgmt.h \ libstoragemgmt_accessgroups.h \ libstoragemgmt_blockrange.h \ libstoragemgmt_capabilities.h \ libstoragemgmt_common.h \ libstoragemgmt_disk.h \ libstoragemgmt_error.h \ libstoragemgmt_fs.h \ libstoragemgmt_nfsexport.h \ libstoragemgmt_hash.h \ libstoragemgmt_plug_interface.h \ libstoragemgmt_pool.h \ libstoragemgmt_snapshot.h \ libstoragemgmt_systems.h \ libstoragemgmt_targetport.h \ libstoragemgmt_types.h \ libstoragemgmt_version.h \ libstoragemgmt_volumes.h all: all-am .SUFFIXES: $(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu c_binding/include/libstoragemgmt/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --gnu c_binding/include/libstoragemgmt/Makefile .PRECIOUS: Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): libstoragemgmt_version.h: $(top_builddir)/config.status $(srcdir)/libstoragemgmt_version.h.in cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs install-lsmincHEADERS: $(lsminc_HEADERS) @$(NORMAL_INSTALL) @list='$(lsminc_HEADERS)'; test -n "$(lsmincdir)" || list=; \ if test -n "$$list"; then \ echo " $(MKDIR_P) '$(DESTDIR)$(lsmincdir)'"; \ $(MKDIR_P) "$(DESTDIR)$(lsmincdir)" || exit 1; \ fi; \ for p in $$list; do \ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ echo "$$d$$p"; \ done | $(am__base_list) | \ while read files; do \ echo " $(INSTALL_HEADER) $$files '$(DESTDIR)$(lsmincdir)'"; \ $(INSTALL_HEADER) $$files "$(DESTDIR)$(lsmincdir)" || exit $$?; \ done uninstall-lsmincHEADERS: @$(NORMAL_UNINSTALL) @list='$(lsminc_HEADERS)'; test -n "$(lsmincdir)" || list=; \ files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ dir='$(DESTDIR)$(lsmincdir)'; $(am__uninstall_files_from_dir) ID: $(am__tagged_files) $(am__define_uniq_tagged_files); mkid -fID $$unique tags: tags-am TAGS: tags tags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files) set x; \ here=`pwd`; \ $(am__define_uniq_tagged_files); \ shift; \ if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ if test $$# -gt 0; then \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ "$$@" $$unique; \ else \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ $$unique; \ fi; \ fi ctags: ctags-am CTAGS: ctags ctags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files) $(am__define_uniq_tagged_files); \ test -z "$(CTAGS_ARGS)$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$unique GTAGS: here=`$(am__cd) $(top_builddir) && pwd` \ && $(am__cd) $(top_srcdir) \ && gtags -i $(GTAGS_ARGS) "$$here" cscopelist: cscopelist-am cscopelist-am: $(am__tagged_files) list='$(am__tagged_files)'; \ case "$(srcdir)" in \ [\\/]* | ?:[\\/]*) sdir="$(srcdir)" ;; \ *) sdir=$(subdir)/$(srcdir) ;; \ esac; \ for i in $$list; do \ if test -f "$$i"; then \ echo "$(subdir)/$$i"; \ else \ echo "$$sdir/$$i"; \ fi; \ done >> $(top_builddir)/cscope.files distclean-tags: -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags distdir: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done check-am: all-am check: check-am all-am: Makefile $(HEADERS) installdirs: for dir in "$(DESTDIR)$(lsmincdir)"; do \ test -z "$$dir" || $(MKDIR_P) "$$dir"; \ done install: install-am install-exec: install-exec-am install-data: install-data-am uninstall: uninstall-am install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-am install-strip: if test -z '$(STRIP)'; then \ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ install; \ else \ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ fi mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-am clean-am: clean-generic clean-libtool mostlyclean-am distclean: distclean-am -rm -f Makefile distclean-am: clean-am distclean-generic distclean-tags dvi: dvi-am dvi-am: html: html-am html-am: info: info-am info-am: install-data-am: install-lsmincHEADERS install-dvi: install-dvi-am install-dvi-am: install-exec-am: @$(NORMAL_INSTALL) $(MAKE) $(AM_MAKEFLAGS) install-exec-hook install-html: install-html-am install-html-am: install-info: install-info-am install-info-am: install-man: install-pdf: install-pdf-am install-pdf-am: install-ps: install-ps-am install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-am -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-am mostlyclean-am: mostlyclean-generic mostlyclean-libtool pdf: pdf-am pdf-am: ps: ps-am ps-am: uninstall-am: uninstall-lsmincHEADERS .MAKE: install-am install-exec-am install-strip .PHONY: CTAGS GTAGS TAGS all all-am check check-am clean clean-generic \ clean-libtool cscopelist-am ctags ctags-am distclean \ distclean-generic distclean-libtool distclean-tags distdir dvi \ dvi-am html html-am info info-am install install-am \ install-data install-data-am install-dvi install-dvi-am \ install-exec install-exec-am install-exec-hook install-html \ install-html-am install-info install-info-am \ install-lsmincHEADERS install-man install-pdf install-pdf-am \ install-ps install-ps-am install-strip installcheck \ installcheck-am installdirs maintainer-clean \ maintainer-clean-generic mostlyclean mostlyclean-generic \ mostlyclean-libtool pdf pdf-am ps ps-am tags tags-am uninstall \ uninstall-am uninstall-lsmincHEADERS install-exec-hook: $(mkinstalldirs) $(DESTDIR)$(lsmincdir) # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: libstoragemgmt-1.2.3/c_binding/include/libstoragemgmt/libstoragemgmt_systems.h0000664000175000017500000000500412537737032025027 00000000000000/* * Copyright (C) 2011-2014 Red Hat, Inc. * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; If not, see . * * Author: tasleson */ #ifndef LIBSTORAGEMGMT_SYSTEMS_H #define LIBSTORAGEMGMT_SYSTEMS_H #include "libstoragemgmt_common.h" #ifdef __cplusplus extern "C" { #endif /** * Duplicated a system record. * NOTE: Make sure to free resources with a call to lsm_system_record_free * @param s Record to duplicate * @return NULL on memory allocation failure, else duplicated record. */ lsm_system LSM_DLL_EXPORT *lsm_system_record_copy(lsm_system *s); /** * Frees the resources for a lsm_system * @param s Record to release * @return LSM_ERR_OK on success, else error reason. */ int LSM_DLL_EXPORT lsm_system_record_free(lsm_system *s); /** * Frees the resources for an array for lsm_system * @param s Array to release memory for * @param size Number of elements. * @return LSM_ERR_OK on success, else error reason. * */ int LSM_DLL_EXPORT lsm_system_record_array_free(lsm_system *s[], uint32_t size); /** * Retrieve the Id for the system. * Note: Address returned is valid until lsm_system gets freed, copy return * value if you need longer scope. Do not free returned string. * @param s System to retrieve id for. * @return NULL on error, else value. */ const char LSM_DLL_EXPORT *lsm_system_id_get(lsm_system *s); /** * Retrieve the Id for the system. * Note: Address returned is valid until lsm_system gets freed, copy return * value if you need longer scope. Do not free returned string. * @param s System to retrieve id for. * @return NULL on error, else value. */ const char LSM_DLL_EXPORT *lsm_system_name_get(lsm_system *s); /** * Retrieve the status for the system. * @param s System to retrieve status for * @return System status which is a bit sensitive field, returns UINT32_MAX on * bad system pointer. */ uint32_t LSM_DLL_EXPORT lsm_system_status_get(lsm_system *s); #ifdef __cplusplus } #endif #endif libstoragemgmt-1.2.3/c_binding/include/libstoragemgmt/libstoragemgmt.h0000664000175000017500000013310312537737032023242 00000000000000/* * Copyright (C) 2011-2014 Red Hat, Inc. * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; If not, see . * * Author: tasleson */ #ifndef LIBSTORAGEMGMT_H #define LIBSTORAGEMGMT_H #include "libstoragemgmt_types.h" #include "libstoragemgmt_common.h" #include "libstoragemgmt_accessgroups.h" #include "libstoragemgmt_blockrange.h" #include "libstoragemgmt_capabilities.h" #include "libstoragemgmt_disk.h" #include "libstoragemgmt_error.h" #include "libstoragemgmt_fs.h" #include "libstoragemgmt_nfsexport.h" #include "libstoragemgmt_pool.h" #include "libstoragemgmt_snapshot.h" #include "libstoragemgmt_systems.h" #include "libstoragemgmt_targetport.h" #include "libstoragemgmt_volumes.h" /*! \mainpage libStorageMgmt * * \section Introduction * * The libStorageMgmt package is a storage array independent Application * Programming Interface (API). It provides a stable and consistent API that * allows developers the ability to programmatically manage different storage * arrays and leverage the hardware accelerated features that they provide. * * \section additional Additional documentation * * Full documentation can be found at: * http://libstorage.github.io/libstoragemgmt-doc/ * */ #ifdef __cplusplus extern "C" { #endif /** * Get a connection to a storage provider. * @param[in] uri Uniform Resource Identifier (see URI documentation) * @param[in] password Password for the storage array (optional, can be NULL) * @param[out] conn The connection to use for all the other library calls * @param[in] timeout Time-out in milliseconds, (initial value). * @param[out] e Error data if connection failed. * @param[in] flags Reserved for future use, must be zero. * @return LSM_ERR_OK on success, else error * code @see lsm_error_number */ int LSM_DLL_EXPORT lsm_connect_password(const char *uri, const char *password, lsm_connect **conn, uint32_t timeout, lsm_error_ptr *e, lsm_flag flags); /** * Closes a connection to a storage provider. * @param[in] conn Valid connection to close * @param[in] flags Reserved for future use, must be zero. * @return LSM_ERR_OK on success, else error * code @see lsm_error_number */ int LSM_DLL_EXPORT lsm_connect_close(lsm_connect *conn, lsm_flag flags); /** * Retrieve information about the plug-in * NOTE: Caller needs to free desc and version! * @param[in] conn Valid connection @see lsm_connect_password * @param[out] desc Plug-in description * @param[out] version Plug-in version * @param [in] flags Reserved for future use, must be zero. * @return LSM_ERR_OK on success, else error * code @see lsm_error_number */ int LSM_DLL_EXPORT lsm_plugin_info_get(lsm_connect *conn, char **desc, char **version, lsm_flag flags); /** * Retrieve a list of available plug-ins. * @param[in] sep Return data separator * @param[out] plugins String list of plug-ins with the form * descversion * @param[in] flags Reserved for future use, must be zero. * @return LSM_ERR_OK on success, else error * code @see lsm_error_number */ int LSM_DLL_EXPORT lsm_available_plugins_list(const char *sep, lsm_string_list ** plugins, lsm_flag flags); /** * Sets the time-out for this connection. * @param[in] conn Valid connection @see lsm_connect_password * @param[in] timeout Time-out (in ms) * @param[in] flags Reserved for future use, must be zero. * @return LSM_ERR_OK on success, else error reason */ int LSM_DLL_EXPORT lsm_connect_timeout_set(lsm_connect *conn, uint32_t timeout, lsm_flag flags); /** * Gets the time-out for this connection. * @param[in] conn Valid connection @see lsm_connect_password * @param[out] timeout Time-out (in ms) * @param[in] flags Reserved for future use, must be zero. * @return LSM_ERR_OK on success, else error reason */ int LSM_DLL_EXPORT lsm_connect_timeout_get(lsm_connect *conn, uint32_t *timeout, lsm_flag flags); /** * Check on the status of a job, no data to return on completion. * @param[in] conn Valid connection * @param[in] job_id Job id * @param[out] status Job Status * @param[out] percent_complete Percent job complete * @param[in] flags Reserved for future use, must be zero. * @return LSM_ERR_OK on success, else error reason */ int LSM_DLL_EXPORT lsm_job_status_get(lsm_connect *conn, const char *job_id, lsm_job_status *status, uint8_t *percent_complete, lsm_flag flags); /** * Check on the status of a job and return the pool information when * complete * @param[in] conn Valid connection pointer * @param[in] job_id Job to check status on * @param[out] status What is the job status * @param[out] percent_complete Domain 0..100 * @param[out] pool lsm_pool for completed operation * @param[in] flags Reserved for future use, must be zero * @return LSM_ERR_OK on success, else error reason. */ int LSM_DLL_EXPORT lsm_job_status_pool_get(lsm_connect *conn, const char *job_id, lsm_job_status *status, uint8_t *percent_complete, lsm_pool **pool, lsm_flag flags); /** * Check on the status of a job and returns the volume information when * complete. * @param[in] conn Valid connection pointer. * @param[in] job_id Job to check status on * @param[out] status What is the job status * @param[out] percent_complete Domain 0..100 * @param[out] vol lsm_volume for completed operation. * @param[in] flags Reserved for future use, must be zero. * @return LSM_ERR_OK on success, else error reason. */ int LSM_DLL_EXPORT lsm_job_status_volume_get(lsm_connect *conn, const char *job_id, lsm_job_status *status, uint8_t *percent_complete, lsm_volume **vol, lsm_flag flags); /** * Check on the status of a job and return the fs information when complete. * @param[in] conn Valid connection pointer * @param[in] job_id Job to check * @param[out] status What is the job status * @param[out] percent_complete Percent of job complete * @param[out] fs lsm_fs * for the completed operation * @param[in] flags Reserved for future use, must be zero. * @return LSM_ERR_OK on success, else error reason. */ int LSM_DLL_EXPORT lsm_job_status_fs_get(lsm_connect *conn, const char *job_id, lsm_job_status *status, uint8_t *percent_complete, lsm_fs **fs, lsm_flag flags); /** * Check on the status of a job and return the snapshot information when * compete. * @param[in] c Valid connection pointer * @param[in] job Job id to check * @param[out] status Job status * @param[out] percent_complete Percent complete * @param[out] ss Snap shot information * @param[in] flags Reserved for future use, must be zero. * @return LSM_ERR_OK on success, else error reason */ int LSM_DLL_EXPORT lsm_job_status_ss_get(lsm_connect *c, const char *job, lsm_job_status * status, uint8_t * percent_complete, lsm_fs_ss ** ss, lsm_flag flags); /** * Frees the resources used by a job. * @param[in] conn Valid connection pointer * @param[in] job_id Job ID * @param[in] flags Reserved for future use, must be zero. * @return LSM_ERR_OK, else error reason. */ int LSM_DLL_EXPORT lsm_job_free(lsm_connect *conn, char **job_id, lsm_flag flags); /** * Storage system query functions */ /** * Query the capabilities of the storage array. * @param[in] conn Valid connection @see lsm_connect_password * @param[in] system System of interest * @param[out] cap The storage array capabilities * @param[in] flags Reserved for future use, must be zero. * @return LSM_ERR_OK on success else error reason */ int LSM_DLL_EXPORT lsm_capabilities(lsm_connect *conn, lsm_system *system, lsm_storage_capabilities **cap, lsm_flag flags); /** * Query the list of storage pools on the array. * @param[in] conn Valid connection @see lsm_connect_password * @param[in] search_key Search key (NULL for all) * @param[in] search_value Search value * @param[out] pool_array Array of storage pools * @param[out] count Number of storage pools * @param[in] flags Reserved, set to 0 * @return LSM_ERR_OK on success else error reason */ int LSM_DLL_EXPORT lsm_pool_list(lsm_connect *conn, char *search_key, char *search_value, lsm_pool **pool_array[], uint32_t *count, lsm_flag flags); /** * Volume management functions */ /** * Gets a list of logical units for this array. * @param[in] conn Valid connection @see lsm_connect_password * @param[in] search_key Search key (NULL for all) * @param[in] search_value Search value * @param[out] volumes An array of lsm_volume * @param[out] count Number of elements in the lsm_volume array * @param[in] flags Reserved set to 0 * @return LSM_ERR_OK on success else error reason */ int LSM_DLL_EXPORT lsm_volume_list(lsm_connect *conn, const char *search_key, const char *search_value, lsm_volume ** volumes[], uint32_t *count, lsm_flag flags); /** * Get a list of disk for this array. * @param [in] conn Valid connection @see * lsm_connect_password * @param[in] search_key Search key (NULL for all) * @param[in] search_value Search value * @param [out] disks An array of lsm_disk types * @param [out] count Number of disks * @param [in] flags Reserved set to zero * @return LSM_ERR_OK on success else error reason */ int LSM_DLL_EXPORT lsm_disk_list(lsm_connect *conn, const char *search_key, const char *search_value, lsm_disk **disks[], uint32_t *count, lsm_flag flags); /** * Creates a new volume (aka. LUN). * @param[in] conn Valid connection @see lsm_connect_password * @param[in] pool Valid pool @see lsm_pool (OPTIONAL, use NULL * for plug-in choice) * @param[in] volume_name Human recognizable name (not all arrays * support) * @param[in] size Size of new volume in bytes (actual size will * be based on array rounding to block size) * @param[in] provisioning Type of volume provisioning to use * @param[out] new_volume Valid volume @see lsm_volume * @param[out] job Indicates job id * @param[in] flags Reserved for future use, must be zero. * @return LSM_ERR_OK on success, LSM_ERR_JOB_STARTED if async. , else error * code */ int LSM_DLL_EXPORT lsm_volume_create(lsm_connect *conn, lsm_pool * pool, const char *volume_name, uint64_t size, lsm_volume_provision_type provisioning, lsm_volume **new_volume, char **job, lsm_flag flags); /** * Resize an existing volume. * @param[in] conn Valid connection @see lsm_connect_password * @param[in] volume volume to re-size * @param[in] new_size New size of volume * @param[out] resized_volume Pointer to newly re-sized lun. * @param[out] job Indicates job id * @param[in] flags Reserved for future use, must be zero. * @return LSM_ERR_OK on success, LSM_ERR_JOB_STARTED if async. , else error * code */ int LSM_DLL_EXPORT lsm_volume_resize(lsm_connect *conn, lsm_volume * volume, uint64_t new_size, lsm_volume **resized_volume, char **job, lsm_flag flags); /** * Replicates a volume * @param[in] conn Valid connection @see lsm_connect_password * @param[in] pool Valid pool * @param[in] rep_type Type of replication lsm_replication_type * @param[in] volume_src Which volume to replicate * @param[in] name Human recognizable name (not all arrays * support) * @param[out] new_replicant New replicated volume lsm_volume_t * @param[out] job Indicates job id * @param[in] flags Reserved for future use, must be zero. * @return LSM_ERR_OK on success, LSM_ERR_JOB_STARTED if async. , else error * code */ int LSM_DLL_EXPORT lsm_volume_replicate(lsm_connect *conn, lsm_pool *pool, lsm_replication_type rep_type, lsm_volume *volume_src, const char *name, lsm_volume **new_replicant, char **job, lsm_flag flags); /** * Unit of block size for the replicate range method. * @param[in] conn Valid connection * @param[in] system Valid lsm_system * @param[out] bs Block size * @param[in] flags Reserved for future use, must be zero. * @return LSM_ERR_OK on success, else error reason. */ int LSM_DLL_EXPORT lsm_volume_replicate_range_block_size(lsm_connect *conn, lsm_system *system, uint32_t *bs, lsm_flag flags); /** * Replicates a portion of a volume to a volume. * @param[in] conn Valid connection * @param[in] rep_type Replication type * @param[in] source Source volume * @param[in] dest Destination volume (can be same as source) * @param[in] ranges An array of block ranges * @param[in] num_ranges Number of entries in ranges. * @param[out] job Indicates job id * @param[in] flags Reserved for future use, must be zero. * @return LSM_ERR_OK on success, LSM_ERR_JOB_STARTED if async., else error * code */ int LSM_DLL_EXPORT lsm_volume_replicate_range(lsm_connect *conn, lsm_replication_type rep_type, lsm_volume *source, lsm_volume *dest, lsm_block_range **ranges, uint32_t num_ranges, char **job, lsm_flag flags); /** * Deletes a logical unit and data is lost! * @param[in] conn Valid connection @see lsm_connect_password * @param[in] volume Volume that is to be deleted. * @param[out] job Indicates job id * @param[in] flags Reserved for future use, must be zero. * @return LSM_ERR_OK on success, LSM_ERR_JOB_STARTED if async. , else error * code */ int LSM_DLL_EXPORT lsm_volume_delete(lsm_connect *conn, lsm_volume *volume, char **job, lsm_flag flags); /** * Set a Volume to online * @param[in] conn Valid connection @see lsm_connect_password * @param[in] volume Volume that is to be placed online * @param[in] flags Reserved for future use, must be zero. * @return LSM_ERR_OK on success, else error * code */ int LSM_DLL_EXPORT lsm_volume_enable(lsm_connect *conn, lsm_volume *volume, lsm_flag flags); /** * Set a Volume to offline * @param[in] conn Valid connection @see lsm_connect_password * @param[in] volume Volume that is to be placed online * @param[in] flags Reserved for future use, must be zero. * @return LSM_ERR_OK on success, else error * code */ int LSM_DLL_EXPORT lsm_volume_disable(lsm_connect *conn, lsm_volume * volume, lsm_flag flags); /** * Set the username password for CHAP authentication, inbound and outbound. * @param conn Valid connection pointer * @param init_id Initiator ID * @param in_user inbound user name * @param in_password inbound password * @param out_user outbound user name * @param out_password outbound password * @param flags Reserved for future use, must be zero. * @return LSM_ERR_OK on success, else error * code. */ int LSM_DLL_EXPORT lsm_iscsi_chap_auth(lsm_connect *conn, const char *init_id, const char *in_user, const char *in_password, const char *out_user, const char *out_password, lsm_flag flags); /** * Retrieves a list of access groups. * @param[in] conn Valid connection @see lsm_connect_password * @param[in] search_key Search key (NULL for all) * @param[in] search_value Search value * @param[out] groups Array of access groups * @param[out] group_count Size of array * @param[in] flags Reserved set to zero * @return LSM_ERR_OK on success, else error reason. */ int LSM_DLL_EXPORT lsm_access_group_list(lsm_connect *conn, const char *search_key, const char *search_value, lsm_access_group **groups[], uint32_t *group_count, lsm_flag flags); /** * Creates a new access group with one initiator in it. * @param[in] conn Valid connection @see lsm_connect_password * @param[in] name Name of access group * @param[in] init_id Initiator id to be added to group * @param[in] init_type Initiator type * @param[in] system System to create access group for * @param[out] access_group Returned access group * @param[in] flags Reserved for future use, must be zero. * @return LSM_ERR_OK on success, else error reason */ int LSM_DLL_EXPORT lsm_access_group_create(lsm_connect *conn, const char *name, const char *init_id, lsm_access_group_init_type init_type, lsm_system * system, lsm_access_group **access_group, lsm_flag flags); /** * Deletes an access group. * @param[in] conn Valid connection @see lsm_connect_password * @param[in] access_group Group to delete * @param[in] flags Reserved for future use, must be zero. * @return LSM_ERR_OK on success, else error reason. */ int LSM_DLL_EXPORT lsm_access_group_delete(lsm_connect *conn, lsm_access_group * access_group, lsm_flag flags); /** * Adds an initiator to the access group * @param[in] conn Valid connection @see lsm_connect_password * @param[in] access_group Group to modify * @param[in] init_id Initiator to add to group * @param[in] init_type Type of initiator * @param[out] updated_access_group Updated access group * @param[in] flags Reserved for future use, must be zero. * @return LSM_ERR_OK on success, else error reason. */ int LSM_DLL_EXPORT \ lsm_access_group_initiator_add(lsm_connect *conn, lsm_access_group *access_group, const char *init_id, lsm_access_group_init_type init_type, lsm_access_group **updated_access_group, lsm_flag flags); /** * Removes an initiator from an access group. * @param[in] conn Valid connection @see lsm_connect_password * @param[in] access_group Group to modify * @param[in] initiator_id Initiator to delete from group * @param[in] init_type Type of initiator, enumerated type * @param[out] updated_access_group Updated access group * @param[in] flags Reserved for future use, must be zero. * @return[in] LSM_ERR_OK on success, else error reason. */ int LSM_DLL_EXPORT lsm_access_group_initiator_delete(lsm_connect *conn, lsm_access_group *access_group, const char *initiator_id, lsm_access_group_init_type init_type, lsm_access_group **updated_access_group, lsm_flag flags); /** * Grants access to a volume for the specified group * @param[in] conn Valid connection * @param[in] access_group Valid group pointer * @param[in] volume Valid volume pointer * @param[in] flags Reserved for future use, must be zero. * @return LSM_ERR_OK on success, else error reason. */ int LSM_DLL_EXPORT lsm_volume_mask(lsm_connect *conn, lsm_access_group *access_group, lsm_volume *volume, lsm_flag flags); /** * Revokes access to a volume for the specified group * @param[in] conn Valid connection * @param[in] access_group Valid group pointer * @param[in] volume Valid volume pointer * @param[in] flags Reserved for future use, must be zero. * @return LSM_ERR_OK on success, else error reason. */ int LSM_DLL_EXPORT lsm_volume_unmask(lsm_connect *conn, lsm_access_group *access_group, lsm_volume *volume, lsm_flag flags); /** * Returns those volumes that the specified group has access to. * @param[in] conn Valid connection * @param[in] group Valid group * @param[out] volumes An array of volumes * @param[out] count Number of volumes * @param[in] flags Reserved for future use, must be zero. * @return LSM_ERR_OK on success, else error reason. */ int LSM_DLL_EXPORT lsm_volumes_accessible_by_access_group(lsm_connect *conn, lsm_access_group *group, lsm_volume **volumes[], uint32_t *count, lsm_flag flags); /** * Retrieves the access groups that have access to the specified volume. * @param[in] conn Valid connection * @param[in] volume Valid volume * @param[out] groups An array of access groups * @param[out] group_count Number of access groups * @param[in] flags Reserved for future use, must be zero. * @return LSM_ERR_OK on success, else error reason. */ int LSM_DLL_EXPORT lsm_access_groups_granted_to_volume(lsm_connect *conn, lsm_volume *volume, lsm_access_group **groups[], uint32_t *group_count, lsm_flag flags); /** * Returns 1 if the specified volume has child dependencies. * @param[in] conn Valid connection * @param[in] volume Valid volume * @param[out] yes 1 == Yes, 0 == No * @param[in] flags Reserved for future use, must be zero. * @return LSM_ERR_OK on success, else error reason. */ int LSM_DLL_EXPORT lsm_volume_child_dependency(lsm_connect *conn, lsm_volume *volume, uint8_t *yes, lsm_flag flags); /** * Instructs the array to remove all child dependencies by replicating * required storage. * @param[in] conn Valid connection * @param[in] volume Valid volume * @param[out] job Job id * @param[in] flags Reserved for future use, must be zero. * @return LSM_ERR_OK on success, else error reason. */ int LSM_DLL_EXPORT \ lsm_volume_child_dependency_delete(lsm_connect *conn, lsm_volume *volume, char **job, lsm_flag flags); /** * Retrieves information about the different arrays accessible. * NOTE: Free returned systems by calling to lsm * @param[in] conn Valid connection * @param[out] systems Array of lsm_system * @param[out] system_count Number of systems * @param[in] flags Reserved set to zero * @return LSM_ERR_OK on success, else error reason */ int LSM_DLL_EXPORT lsm_system_list(lsm_connect *conn, lsm_system ** systems[], uint32_t * system_count, lsm_flag flags); /** * Retrieves information about the available file systems. * @param[in] conn Valid connection * @param[in] search_key Search key (NULL for all) * @param[in] search_value Search value * @param[out] fs Array of lsm_fs * @param[out] fs_count Number of file systems * @param[in] flags Reserved set to zero * @return LSM_ERR_OK on success, else error reason */ int LSM_DLL_EXPORT lsm_fs_list(lsm_connect *conn, const char *search_key, const char *search_value, lsm_fs **fs[], uint32_t *fs_count, lsm_flag flags); /** * Creates a new file system from the specified pool * @param[in] conn Valid connection * @param[in] pool Valid pool * @param[in] name File system name * @param[in] size_bytes Size of file system in bytes * @param[out] fs Newly created fs * @param[out] job Job id if job is async. * @param[in] flags Reserved for future use, must be zero. * @return LSM_ERR_OK on success, LSM_ERR_JOB_STARTED if async. , * else error code */ int LSM_DLL_EXPORT lsm_fs_create(lsm_connect *conn, lsm_pool * pool, const char *name, uint64_t size_bytes, lsm_fs **fs, char **job, lsm_flag flags); /** * Deletes a file system * @param[in] conn Valid connection * @param fs File system to delete * @param job Job id if job is created async. * @param[in] flags Reserved for future use, must be zero. * @return LSM_ERR_OK on success, LSM_ERR_JOB_STARTED if async. , * else error code */ int LSM_DLL_EXPORT lsm_fs_delete(lsm_connect *conn, lsm_fs *fs, char **job, lsm_flag flags); /** * Clones an existing file system * @param conn Valid connection * @param src_fs Source file system * @param name Name of new file system * @param optional_ss Optional snapshot to base clone from * @param cloned_fs Newly cloned file system record * @param job Job id if operation is async. * @param[in] flags Reserved for future use, must be zero. * @return LSM_ERR_OK on succees, LSM_ERR_JOB_STARTED if async., else * error code. */ int LSM_DLL_EXPORT lsm_fs_clone(lsm_connect *conn, lsm_fs *src_fs, const char *name, lsm_fs_ss *optional_ss, lsm_fs **cloned_fs, char **job, lsm_flag flags); /** * Checks to see if the specified file system has a child dependency. * @param[in] conn Valid connection * @param[in] fs Specific file system * @param[in] files Specific files to check (NULL OK) * @param[out] yes Zero indicates no, else yes * @param[in] flags Reserved for future use, must be zero. * @return LSM_ERR_OK on success, else error * code. */ int LSM_DLL_EXPORT lsm_fs_child_dependency(lsm_connect *conn, lsm_fs *fs, lsm_string_list *files, uint8_t *yes, lsm_flag flags); /** * Removes child dependencies by duplicating the required storage to remove. * Note: This could take a long time to complete based on dependencies. * @param[in] conn Valid connection * @param[in] fs File system to remove dependencies for * @param[in] files Specific files to check (NULL OK) * @param[out] job Job id for async. identification * @param[in] flags Reserved for future use, must be zero. * @return LSM_ERR_OK on success, LSM_ERR_JOB_STARTED if async. , * else error code */ int LSM_DLL_EXPORT lsm_fs_child_dependency_delete(lsm_connect *conn, lsm_fs *fs, lsm_string_list *files, char **job, lsm_flag flags); /** * Resizes a file system * @param[in] conn Valid connection * @param[in] fs File system to re-size * @param[in] new_size_bytes New size of fs * @param[out] rfs File system information for re-sized fs * @param[out] job_id Job id for async. identification * @param[in] flags Reserved for future use, must be zero. * @return @return LSM_ERR_OK on success, LSM_ERR_JOB_STARTED if async. , * else error code */ int LSM_DLL_EXPORT lsm_fs_resize(lsm_connect *conn, lsm_fs *fs, uint64_t new_size_bytes, lsm_fs **rfs, char **job_id, lsm_flag flags); /** * Clones a file on a file system. * @param[in] conn Valid connection * @param[in] fs File system which file resides * @param[in] src_file_name Source file relative name & path * @param[in] dest_file_name Dest. file relative name & path * @param[in] snapshot Optional backing snapshot * @param[out] job Job id for async. operation * @param[in] flags Reserved for future use, must be zero. * @return @return LSM_ERR_OK on success, LSM_ERR_JOB_STARTED if async. , * else error code */ int LSM_DLL_EXPORT lsm_fs_file_clone(lsm_connect *conn, lsm_fs *fs, const char *src_file_name, const char *dest_file_name, lsm_fs_ss *snapshot, char **job, lsm_flag flags); /** * Return a list of snapshots * @param[in] conn Valid connection * @param[in] fs File system to check for snapshots * @param[out] ss An array of snapshot pointers * @param[out] ss_count Number of elements in the array * @param[in] flags Reserved set to zero * @return LSM_ERR_OK on success, else error reason */ int LSM_DLL_EXPORT lsm_fs_ss_list(lsm_connect *conn, lsm_fs *fs, lsm_fs_ss ** ss[], uint32_t * ss_count, lsm_flag flags); /** * Creates a snapshot * @param[in] c Valid connection * @param[in] fs File system to snapshot * @param[in] name Name of snap shot * @param[out] snapshot Snapshot that was created * @param[out] job Job id if the operation is async. * @param[in] flags Reserved for future use, must be zero. * @return LSM_ERR_OK on success, LSM_ERR_JOB_STARTED if async., * else error code */ int LSM_DLL_EXPORT lsm_fs_ss_create(lsm_connect *c, lsm_fs *fs, const char *name, lsm_fs_ss **snapshot, char **job, lsm_flag flags); /** * Deletes a snapshot * @param[in] c Valid connection * @param[in] fs File system * @param[in] ss Snapshot to delete * @param[out] job Job id if the operation is async. * @param[in] flags Reserved for future use, must be zero. * @return LSM_ERR_OK on success, LSM_ERR_JOB_STARTED if async., else error * code. */ int LSM_DLL_EXPORT lsm_fs_ss_delete(lsm_connect *c, lsm_fs *fs, lsm_fs_ss *ss, char **job, lsm_flag flags); /** * Restores a file system or files to a previous state as specified in the * snapshot. * @param c Valid connection * @param fs File system which contains the snapshot * @param ss Snapshot to restore to * @param files Optional list of files to restore * @param restore_files Optional list of file names to restore to * @param all_files 0 = False else True * @param job Job id if operation is async. * @param[in] flags Reserved for future use, must be zero. * @return LSM_ERR_OK on success, LSM_ERR_JOB_STARTED if async., else error * code */ int LSM_DLL_EXPORT lsm_fs_ss_restore(lsm_connect *c, lsm_fs *fs, lsm_fs_ss *ss, lsm_string_list *files, lsm_string_list *restore_files, int all_files, char **job, lsm_flag flags); /** * Returns the types of NFS client authentication the array supports. * @param[in] c Valid connection * @param[out] types List of types * @param[in] flags Reserved for future use, must be zero. * @return LSM_ERR_OK on success, else error code. */ int LSM_DLL_EXPORT lsm_nfs_auth_types(lsm_connect *c, lsm_string_list **types, lsm_flag flags); /** * Lists the nfs exports on the specified array. * @param[in] c Valid connection * @param[in] search_key Search key (NULL for all) * @param[in] search_value Search value * @param[out] exports An array of lsm_nfs_export * @param[out] count Number of items in array * @param[in] flags Reserved set to zero * @return LSM_ERR_OK on success else error code. */ int LSM_DLL_EXPORT lsm_nfs_list(lsm_connect *c, const char *search_key, const char *search_value, lsm_nfs_export **exports[], uint32_t *count, lsm_flag flags); /** * Creates or modifies an NFS export. * @param[in] c Valid connection * @param[in] fs_id File system ID to export via NFS * @param[in] export_path Export path * @param[in] root_list List of hosts that have root access * @param[in] rw_list List of hosts that have read/write access * @param[in] ro_list List of hosts that have read only access * @param[in] anon_uid UID to map to anonymous * @param[in] anon_gid GID to map to anonymous * @param[in] auth_type Array specific authentication types * @param[in] options Array specific options * @param[out] exported Export record * @param[in] flags Reserved for future use, must be zero. * @return LSM_ERR_OK on success, else error reason. */ int LSM_DLL_EXPORT lsm_nfs_export_fs(lsm_connect *c, const char *fs_id, const char *export_path, lsm_string_list *root_list, lsm_string_list *rw_list, lsm_string_list *ro_list, uint64_t anon_uid, uint64_t anon_gid, const char *auth_type, const char *options, lsm_nfs_export **exported, lsm_flag flags); /** * Delete the export. * @param[in] c Valid connection * @param[in] e NFS export to remove * @param[in] flags Reserved for future use, must be zero. * @return LSM_ERR_OK on success else error code. */ int LSM_DLL_EXPORT lsm_nfs_export_delete(lsm_connect *c, lsm_nfs_export * e, lsm_flag flags); /** * Retrieve a list of target ports * @param[in] c Valid connection * @param[in] search_key Search key (NULL for all) * @param[in] search_value Search value * @param[out] target_ports Array of target ports * @param[out] count Number of target ports * @param[in] flags Reserved, set to 0 * @return LSM_ERR_OK on success else error reason */ int LSM_DLL_EXPORT lsm_target_port_list(lsm_connect *c, const char *search_key, const char *search_value, lsm_target_port **target_ports[], uint32_t *count, lsm_flag flags); /** * Retrieves the pool id that the volume is derived from. New in version 1.2. * @param[in] c Valid connection * @param[in] v Volume ptr. * @param[out] raid_type Enum of lsm_volume_raid_type * @param[out] strip_size Size of the strip on disk or other storage extent. * @param[out] disk_count Count of disks of RAID group(s) where this volume * allocated from. * @param[out] min_io_size Minimum I/O size, also the preferred I/O size * of random I/O. * @param[out] opt_io_size Optimal I/O size, also the preferred I/O size * of sequential I/O. * @param[in] flags Reserved, set to 0 * @return LSM_ERR_OK on success else error reason. */ int LSM_DLL_EXPORT lsm_volume_raid_info(lsm_connect *c, lsm_volume *volume, lsm_volume_raid_type *raid_type, uint32_t *strip_size, uint32_t *disk_count, uint32_t *min_io_size, uint32_t *opt_io_size, lsm_flag flags); /** * Retrieves the membership of given pool. New in version 1.2. * @param[in] c Valid connection * @param[in] pool The lsm_pool ptr. * @param[out] raid_type * Enum of lsm_volume_raid_type. * @param[out] member_type * Enum of lsm_pool_member_type. * @param[out] member_ids * The pointer to lsm_string_list pointer. * When 'member_type' is LSM_POOL_MEMBER_TYPE_POOL, * the 'member_ids' will contain a list of parent Pool * IDs. * When 'member_type' is LSM_POOL_MEMBER_TYPE_DISK, * the 'member_ids' will contain a list of disk IDs. * When 'member_type' is LSM_POOL_MEMBER_TYPE_OTHER or * LSM_POOL_MEMBER_TYPE_UNKNOWN, the member_ids should * be NULL. * Need to use lsm_string_list_free() to free this memory. * @param[in] flags Reserved, set to 0 * @return LSM_ERR_OK on success else error reason. */ int LSM_DLL_EXPORT lsm_pool_member_info(lsm_connect *c, lsm_pool *pool, lsm_volume_raid_type *raid_type, lsm_pool_member_type *member_type, lsm_string_list **member_ids, lsm_flag flags); /** * Query all supported RAID types and strip sizes which could be used * in lsm_volume_raid_create() functions. * New in version 1.2, only available for hardware RAID cards. * @param[in] c Valid connection * @param[in] system * The lsm_sys type. * @param[out] supported_raid_types * The pointer of uint32_t array. Containing * lsm_volume_raid_type values. * You need to free this memory by yourself. * @param[out] supported_raid_type_count * The pointer of uint32_t. Indicate the item count of * supported_raid_types array. * @param[out] supported_strip_sizes * The pointer of uint32_t array. Containing * all supported strip sizes. * You need to free this memory by yourself. * @param[out] supported_strip_size_count * The pointer of uint32_t. Indicate the item count of * supported_strip_sizes array. * @param[in] flags Reserved, set to 0 * @return LSM_ERR_OK on success else error reason. */ int LSM_DLL_EXPORT lsm_volume_raid_create_cap_get(lsm_connect *c, lsm_system *system, uint32_t **supported_raid_types, uint32_t *supported_raid_type_count, uint32_t **supported_strip_sizes, uint32_t *supported_strip_size_count, lsm_flag flags); /** * Create a disk RAID pool and allocate entire full space to new volume. * New in version 1.2, only available for hardware RAID cards. * @param[in] c Valid connection * @param[in] name String. Name for the new volume. It might be ignored or * altered on some hardwardware raid cards in order to fit * their limitation. * @param[in] raid_type * Enum of lsm_volume_raid_type. Please refer to the returns * of lsm_volume_raid_create_cap_get() function for * supported strip sizes. * @param[in] disks * An array of lsm_disk types * @param[in] disk_count * The count of lsm_disk in 'disks' argument. * Count starts with 1. * @param[in] strip_size * uint32_t. The strip size in bytes. Please refer to * the returns of lsm_volume_raid_create_cap_get() function * for supported strip sizes. * @param[out] new_volume * Newly created volume, Pointer to the lsm_volume type * pointer. * @param[in] flags Reserved, set to 0 * @return LSM_ERR_OK on success else error reason. */ int LSM_DLL_EXPORT lsm_volume_raid_create(lsm_connect *c, const char *name, lsm_volume_raid_type raid_type, lsm_disk *disks[], uint32_t disk_count, uint32_t strip_size, lsm_volume **new_volume, lsm_flag flags); #ifdef __cplusplus } #endif #endif /* LIBSTORAGEMGMT_H */ libstoragemgmt-1.2.3/c_binding/include/libstoragemgmt/libstoragemgmt_snapshot.h0000664000175000017500000000407212537737032025163 00000000000000/* * Copyright (C) 2011-2014 Red Hat, Inc. * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; If not, see . * * Author: tasleson */ #ifndef LSM_SNAPSHOT_H #define LSM_SNAPSHOT_H #include "libstoragemgmt_common.h" #ifdef __cplusplus extern "C" { #endif /** * Frees a file system snapshot record. * @param ss Snapshot record * @return LSM_ERR_OK on success, else error reason. */ int LSM_DLL_EXPORT lsm_fs_ss_record_free(lsm_fs_ss *ss); /** * Copies a file system snapshot record. * @param source Source to copy * @return Copy of source record snapshot */ lsm_fs_ss LSM_DLL_EXPORT *lsm_fs_ss_record_copy(lsm_fs_ss *source); /** * Frees an array of snapshot record. * @param ss An array of snapshot record pointers. * @param size Number of snapshot records. * @return LSM_ERR_OK on success, else error reason. */ int LSM_DLL_EXPORT lsm_fs_ss_record_array_free(lsm_fs_ss *ss[], uint32_t size); /** * Returns the file system snapshot id. * @param ss The snapshot record * @return Pointer to id. */ const char LSM_DLL_EXPORT *lsm_fs_ss_id_get(lsm_fs_ss *ss); /** * Returns the name. * @param ss The file system snapshot record * @return The Name */ const char LSM_DLL_EXPORT *lsm_fs_ss_name_get(lsm_fs_ss *ss); /** * Returns the timestamp * @param ss The file system snapshot record. * @return The timestamp the file system snapshot was taken */ uint64_t LSM_DLL_EXPORT lsm_fs_ss_time_stamp_get(lsm_fs_ss *ss); #ifdef __cplusplus } #endif #endif libstoragemgmt-1.2.3/c_binding/include/libstoragemgmt/libstoragemgmt_pool.h0000664000175000017500000000661012537737032024275 00000000000000/* * Copyright (C) 2011-2014 Red Hat, Inc. * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; If not, see . * * Author: tasleson */ #ifndef LIBSTORAGEMGMT_POOL_H #define LIBSTORAGEMGMT_POOL_H #include "libstoragemgmt_common.h" #ifdef __cplusplus extern "C" { #endif /** * Frees the memory for each of the pools and then the pool array itself. * @param pa Pool array to free. * @param size Size of the pool array. * @return LSM_ERR_OK on success, else error reason. */ int LSM_DLL_EXPORT lsm_pool_record_array_free(lsm_pool *pa[], uint32_t size); /** * Frees the memory for an individual pool * @param p Valid pool * @return LSM_ERR_OK on success, else error reason. */ int LSM_DLL_EXPORT lsm_pool_record_free(lsm_pool *p); /** * Copies a lsm_pool record * @param to_be_copied Record to be copied * @return NULL on memory exhaustion, else copy. */ lsm_pool LSM_DLL_EXPORT *lsm_pool_record_copy(lsm_pool *to_be_copied); /** * Retrieves the name from the pool. * Note: Returned value is only valid as long as p is valid!. * @param p Pool * @return The name of the pool. */ char LSM_DLL_EXPORT *lsm_pool_name_get(lsm_pool *p); /** * Retrieves the system wide unique identifier for the pool. * Note: Returned value is only valid as long as p is valid!. * @param p Pool * @return The System wide unique identifier. */ char LSM_DLL_EXPORT *lsm_pool_id_get(lsm_pool *p); /** * Retrieves the total space for the pool. * @param p Pool * @return Total space of the pool. */ uint64_t LSM_DLL_EXPORT lsm_pool_total_space_get(lsm_pool *p); /** * Retrieves the remaining free space in the pool. * @param p Pool * @return The amount of free space. */ uint64_t LSM_DLL_EXPORT lsm_pool_free_space_get(lsm_pool *p); /** * Retrieve the status for the Pool. * @param s Pool to retrieve status for * @return Pool status which is a bit sensitive field, returns UINT64_MAX on * bad pool pointer. */ uint64_t LSM_DLL_EXPORT lsm_pool_status_get(lsm_pool *s); /** * Retrieve the status info for the Pool. * @param s Pool to retrieve status for * @return Pool status info which is a character string. */ const char LSM_DLL_EXPORT *lsm_pool_status_info_get(lsm_pool *s); /** * Retrieve the system id for the specified pool. * @param p Pool pointer * @return System ID */ char LSM_DLL_EXPORT *lsm_pool_system_id_get(lsm_pool *p); /** * Retrieve what the pool can be used to create * @param p Pool pointer * @return Usage value */ uint64_t LSM_DLL_EXPORT lsm_pool_element_type_get(lsm_pool *p); /** * Retrieve what the pool cannot be used for. * @param p Pool pointer * @return bitmap of actions not supported. */ uint64_t LSM_DLL_EXPORT lsm_pool_unsupported_actions_get(lsm_pool *p); #ifdef __cplusplus } #endif #endif /* LIBSTORAGEMGMT_POOL_H */ libstoragemgmt-1.2.3/c_binding/include/libstoragemgmt/libstoragemgmt_error.h0000664000175000017500000001471412537737032024461 00000000000000/* * Copyright (C) 2011-2014 Red Hat, Inc. * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; If not, see . * * Author: tasleson */ #ifndef LIBSTORAGEMGMTERROR_H #define LIBSTORAGEMGMTERROR_H #include "libstoragemgmt_common.h" #ifdef __cplusplus extern "C" { #endif /** @file libstoragemgmt_error.h */ /**< \enum lsm_error_number Possible enumerated return codes from library */ typedef enum { LSM_ERR_OK = 0, /**^ OK */ LSM_ERR_LIB_BUG = 1, /**^ Library BUG */ LSM_ERR_PLUGIN_BUG = 2, /**^ Plugin BUG */ LSM_ERR_JOB_STARTED = 7, /**^ Operation has started */ LSM_ERR_TIMEOUT = 11, /**^ Plug-in is un-responsive */ LSM_ERR_DAEMON_NOT_RUNNING = 12, /**^ Daemon is not running */ LSM_ERR_NAME_CONFLICT = 50, /**^ Name exists */ LSM_ERR_EXISTS_INITIATOR = 52, /**^ Initiator exists in another access group */ LSM_ERR_INVALID_ARGUMENT = 101, /**^ Precondition checks failed */ LSM_ERR_NO_STATE_CHANGE = 125, /**^ Operation completed with no change in array state */ LSM_ERR_NETWORK_CONNREFUSED = 140, /**^ Host on network, but not allowing connection */ LSM_ERR_NETWORK_HOSTDOWN = 141, /**^ Host unreachable on network */ LSM_ERR_NETWORK_ERROR = 142, /**^ Generic network error */ LSM_ERR_NO_MEMORY = 152, /**^ Memory allocation failure */ LSM_ERR_NO_SUPPORT = 153, /**^ Feature not supported */ LSM_ERR_IS_MASKED = 160, /**^ Volume masked to Access Group*/ LSM_ERR_NOT_FOUND_ACCESS_GROUP = 200, /**^ Specified access group not found */ LSM_ERR_NOT_FOUND_FS = 201, /**^ Specified FS not found */ LSM_ERR_NOT_FOUND_JOB = 202, /**^ Specified JOB not found */ LSM_ERR_NOT_FOUND_POOL = 203, /**^ Specified POOL not found */ LSM_ERR_NOT_FOUND_FS_SS = 204, /**^ Specified snap shot not found */ LSM_ERR_NOT_FOUND_VOLUME = 205, /**^ Specified volume not found */ LSM_ERR_NOT_FOUND_NFS_EXPORT = 206, /**^ NFS export not found */ LSM_ERR_NOT_FOUND_SYSTEM = 208, /**^ System not found */ LSM_ERR_NOT_FOUND_DISK = 209, LSM_ERR_NOT_LICENSED = 226, /**^ Need license for feature */ LSM_ERR_NO_SUPPORT_ONLINE_CHANGE = 250, /**^ Take offline before performing operation */ LSM_ERR_NO_SUPPORT_OFFLINE_CHANGE = 251, /**^ Needs to be online to perform operation */ LSM_ERR_PLUGIN_AUTH_FAILED = 300, /**^ Authorization failed */ LSM_ERR_PLUGIN_IPC_FAIL = 301, /**^ Inter-process communication between client & out of process plug-in encountered connection errors.**/ LSM_ERR_PLUGIN_SOCKET_PERMISSION = 307, /**^ Incorrect permission on UNIX domain socket used for IPC */ LSM_ERR_PLUGIN_NOT_EXIST = 311, /**^ Plug-in does not appear to exist */ LSM_ERR_NOT_ENOUGH_SPACE = 350, /**^ Insufficient space */ LSM_ERR_TRANSPORT_COMMUNICATION = 400, /**^ Error comunicating with plug-in */ LSM_ERR_TRANSPORT_SERIALIZATION = 401, /**^ Transport serialization error */ LSM_ERR_TRANSPORT_INVALID_ARG = 402, /**^ Parameter transported over IPC is invalid */ LSM_ERR_LAST_INIT_IN_ACCESS_GROUP = 502, LSM_ERR_UNSUPPORTED_SEARCH_KEY = 510, /**^ Unsupport search key */ LSM_ERR_EMPTY_ACCESS_GROUP = 511, LSM_ERR_POOL_NOT_READY = 512, LSM_ERR_DISK_NOT_FREE = 513, } lsm_error_number; typedef struct _lsm_error lsm_error; typedef lsm_error *lsm_error_ptr; /** * Gets the last error structure * Note: @see lsm_error_free to release memory * @param c Connection pointer. * @return Error pointer, Null if no error exists! */ lsm_error_ptr LSM_DLL_EXPORT lsm_error_last_get(lsm_connect * c); /** * Frees the error record! * @param err The error to free! * @return LSM_ERR_OK on success, else error reason. */ int LSM_DLL_EXPORT lsm_error_free(lsm_error_ptr err); /** * Retrieves the error number from the error. * @param e The lsm_error_ptr * @return -1 if e is not a valid error pointer, else error number. */ lsm_error_number LSM_DLL_EXPORT lsm_error_number_get(lsm_error_ptr e); /** * Retrieves the error message from the error. * Note: The returned value is only valid as long as the e is valid, in * addition the function will return NULL if e is invalid. To remove the * ambiguity call lsm_error_number_get and check return code. * @param e The lsm_error_ptr * @return NULL if message data does not exist, else error message. */ char LSM_DLL_EXPORT *lsm_error_message_get(lsm_error_ptr e); /** * Retrieves the exception message from the error. * Note: The returned value is only valid as long as the e is valid, in * addition the function will return NULL if e is invalid. To remove the * ambiguity call lsm_error_number_get and check return code. * @param e The lsm_error_ptr * @return NULL if exception does not exist, else error exception. */ char LSM_DLL_EXPORT *lsm_error_exception_get(lsm_error_ptr e); /** * Retrieves the error message from the error. * Note: The returned value is only valid as long as the e is valid, in * addition the function will return NULL if e is invalid. To remove the * ambiguity call lsm_error_number_get and check return code. * @param e The lsm_error_ptr * @return NULL if does not exist, else debug message. */ char LSM_DLL_EXPORT *lsm_error_debug_get(lsm_error_ptr e); /** * Retrieves the debug data from the error. * Note: The returned value is only valid as long as the e is valid, in * addition the function will return NULL if e is invalid. To remove the * ambiguity call lsm_error_number_get and check return code. * @param e The lsm_error_ptr * @param[out] size Number of bytes of data returned. * @return NULL if does not exist, else debug message. */ void LSM_DLL_EXPORT *lsm_error_debug_data_get(lsm_error_ptr e, uint32_t * size); #ifdef __cplusplus } #endif #endif /* LIBSTORAGEMGMTERROR_H */ libstoragemgmt-1.2.3/c_binding/include/libstoragemgmt/libstoragemgmt_version.h.in0000664000175000017500000000171512537737032025417 00000000000000/* * Copyright (C) 2011-2013 Red Hat, Inc. * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; If not, see . * * Author: tasleson */ #ifndef LSM_VERSION_H_ #define LSM_VERSION_H_ #define LSM_MAJOR @LIBSM_MAJOR_VERSION@ #define LSM_MINOR @LIBSM_MINOR_VERSION@ #define LSM_MICRO @LIBSM_MICRO_VERSION@ #define LSM_VERSION ((LSM_MAJOR * 10000) + (LSM_MINOR * 100) + LSM_MICRO) #endif libstoragemgmt-1.2.3/c_binding/include/libstoragemgmt/libstoragemgmt_nfsexport.h0000664000175000017500000001625612537737032025363 00000000000000/* * Copyright (C) 2011-2014 Red Hat, Inc. * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; If not, see . * * Author: tasleson */ #ifndef LSM_NFS_EXPORT_H #define LSM_NFS_EXPORT_H #include "libstoragemgmt_types.h" #ifdef __cplusplus extern "C" { #endif /** * Because the nfs export functions use an unsigned data type these values * will be represented as (2**64-1 and 2**64-2 respectively) */ #define ANON_UID_GID_NA -1 #define ANON_UID_GID_ERROR (ANON_UID_GID_NA - 1) /** * Allocated memory for a NFS export record * @param id Export ID (Set to NULL when creating new export) * @param fs_id File system ID that is exported * @param export_path The desired path for the export (May be NULL) * @param auth NFS client authentication type (May be NULL) * @param root List of hosts that have root access (May be NULL) * @param rw List of hosts that have read/write access (May be NULL) * @param ro List of hosts that have read only access (May be NULL) * @param anonuid User id that should be mapped to anonymous * (Valid or set to ANON_UID_GID_NA). * @param anongid Group id that should be mapped to anonymous * (Valid or set to ANON_UID_GID_NA) * @param options String of options passed to array * @param plugin_data Reserved for plug-in use * @return Valid export pointer, else NULL on error. */ lsm_nfs_export LSM_DLL_EXPORT * lsm_nfs_export_record_alloc(const char *id, const char *fs_id, const char *export_path, const char *auth, lsm_string_list *root, lsm_string_list *rw, lsm_string_list *ro, uint64_t anonuid, uint64_t anongid, const char *options, const char *plugin_data); /** * Allocated the memory for an array of NFS export records. * @param size Number of elements * @return Allocated memory, NULL on error */ lsm_nfs_export LSM_DLL_EXPORT ** lsm_nfs_export_record_array_alloc(uint32_t size); /** * Frees the memory for a NFS export record. * @param exp * @return LSM_ERR_OK on success, else error reason. */ int LSM_DLL_EXPORT lsm_nfs_export_record_free(lsm_nfs_export *exp); /** * Frees the memory for the NFS export array and the memory for each entry * @param exps Memory to free * @param size Number of entries * @return LSM_ERR_OK on success, else error reason. * */ int LSM_DLL_EXPORT lsm_nfs_export_record_array_free(lsm_nfs_export * exps[], uint32_t size); /** * Duplicates the source and returns the copy. * @param source Source record to copy * @return Copy of source, else NULL one error. */ lsm_nfs_export LSM_DLL_EXPORT * lsm_nfs_export_record_copy(lsm_nfs_export *source); /** * Returns the ID * @param exp Valid nfs export record * @return Pointer to ID */ const char LSM_DLL_EXPORT *lsm_nfs_export_id_get(lsm_nfs_export *exp); int LSM_DLL_EXPORT lsm_nfs_export_id_set(lsm_nfs_export *exp, const char *ep); /** * Returns the file system id * @param exp Valid nfs export record * @return Pointer to file system id */ const char LSM_DLL_EXPORT *lsm_nfs_export_fs_id_get(lsm_nfs_export *exp); int LSM_DLL_EXPORT lsm_nfs_export_fs_id_set(lsm_nfs_export *exp, const char *fs_id); /** * Returns the export path * @param exp Valid nfs export record * @return Pointer to export path */ const char LSM_DLL_EXPORT * lsm_nfs_export_export_path_get(lsm_nfs_export *exp); int LSM_DLL_EXPORT lsm_nfs_export_export_path_set(lsm_nfs_export *exp, const char *export_path); /** * Returns the client authentication type * @param exp Valid nfs export record * @return Pointer to authentication type */ const char LSM_DLL_EXPORT *lsm_nfs_export_auth_type_get(lsm_nfs_export * exp); int LSM_DLL_EXPORT lsm_nfs_export_auth_type_set(lsm_nfs_export *exp, const char *value); /** * Returns the list of hosts that have root access * @param exp Valid nfs export record * @return list of hosts. */ lsm_string_list LSM_DLL_EXPORT * lsm_nfs_export_root_get(lsm_nfs_export * exp); int LSM_DLL_EXPORT lsm_nfs_export_root_set(lsm_nfs_export *exp, lsm_string_list *value); /** * Returns the list of hosts that have read/write access to export. * @param exp Valid nfs export record * @return list of hosts. */ lsm_string_list LSM_DLL_EXPORT * lsm_nfs_export_read_write_get(lsm_nfs_export *exp); int LSM_DLL_EXPORT lsm_nfs_export_read_write_set(lsm_nfs_export *exp, lsm_string_list *value); /** * Returns the list of hosts that have read only access to export. * @param exp Valid nfs export record * @return list of hosts */ lsm_string_list LSM_DLL_EXPORT * lsm_nfs_export_read_only_get(lsm_nfs_export *exp); int LSM_DLL_EXPORT lsm_nfs_export_read_only_set(lsm_nfs_export *exp, lsm_string_list *value); /** * Returns the id which is to be mapped to anonymous id * @param exp Valid nfs export record * @return ANON_UID_GID_NA value is returned when this isn't set, else value * mapped to anonymous group id. For errors ANON_UID_GID_ERROR is returned. */ uint64_t LSM_DLL_EXPORT lsm_nfs_export_anon_uid_get(lsm_nfs_export *exp); int LSM_DLL_EXPORT lsm_nfs_export_anon_uid_set(lsm_nfs_export *exp, uint64_t value); /** * Returns the group id which is to be mapped to anonymous group * @param exp Valid nfs export record * @return ANON_UID_GID_NA value is returned when this isn't set, else value * mapped to anonymous group id. For errors ANON_UID_GID_ERROR is returned. */ uint64_t LSM_DLL_EXPORT lsm_nfs_export_anon_gid_get(lsm_nfs_export *exp); int LSM_DLL_EXPORT lsm_nfs_export_anon_gid_set(lsm_nfs_export *exp, uint64_t value); /** * Returns the options for this export. * @param exp Valid nfs export record * @return Options value, NULL if not applicable. */ const char LSM_DLL_EXPORT *lsm_nfs_export_options_get(lsm_nfs_export *exp); int LSM_DLL_EXPORT lsm_nfs_export_options_set(lsm_nfs_export *exp, const char *value); #ifdef __cplusplus } #endif #endif libstoragemgmt-1.2.3/c_binding/include/libstoragemgmt/libstoragemgmt_targetport.h0000664000175000017500000000620512537737032025517 00000000000000/* * Copyright (C) 2011-2014 Red Hat, Inc. * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; If not, see . * * Author: tasleson */ #ifndef LIBSTORAGEMGMT_TARGET_PORT_H #define LIBSTORAGEMGMT_TARGET_PORT_H #include "libstoragemgmt_common.h" #ifdef __cplusplus extern "C" { #endif /** * Duplicated a target port record. * NOTE: Make sure to free resources with a call to lsm_target_port_record_free * @param tp Record to duplicate * @return NULL on memory allocation failure, else duplicated record. */ lsm_target_port LSM_DLL_EXPORT *lsm_target_port_copy(lsm_target_port *tp); /** * Frees the resources for a lsm_system * @param tp Record to release * @return LSM_ERR_OK on success, else error reason. */ int LSM_DLL_EXPORT lsm_target_port_record_free(lsm_target_port *tp); /** * Frees the resources for an array for lsm_target_port * @param tp Array to release memory for * @param size Number of elements. * @return LSM_ERR_OK on success, else error reason. * */ int LSM_DLL_EXPORT lsm_target_port_record_array_free(lsm_target_port * tp[], uint32_t size); /** * Returns the ID of the target port * @param tp lsm_target_port record * @return ID, NULL on error */ const char LSM_DLL_EXPORT *lsm_target_port_id_get(lsm_target_port *tp); /** * Returns the type of target port * @param tp lsm_target_port record * @return enumerated value */ lsm_target_port_type LSM_DLL_EXPORT lsm_target_port_type_get(lsm_target_port *tp); /** * Returns the service address * @param tp lsm_target_port record * @return Service address, NULL on error */ const char LSM_DLL_EXPORT * lsm_target_port_service_address_get(lsm_target_port *tp); /** * Returns the network address * @param tp lsm_target_port record * @return Network address, NULL on error */ const char LSM_DLL_EXPORT * lsm_target_port_network_address_get(lsm_target_port *tp); /** * Returns the physical address * @param tp lsm_target_port record * @return Physical address, NULL on error */ const char LSM_DLL_EXPORT * lsm_target_port_physical_address_get(lsm_target_port *tp); /** * Returns the physical name * @param tp lsm_target_port record * @return Physical name, NULL on error */ const char LSM_DLL_EXPORT * lsm_target_port_physical_name_get(lsm_target_port *tp); /** * Returns the system_id * @param tp lsm_target_port record * @return System id, NULL on error */ const char LSM_DLL_EXPORT * lsm_target_port_system_id_get(lsm_target_port *tp); #ifdef __cplusplus } #endif #endif libstoragemgmt-1.2.3/c_binding/include/libstoragemgmt/libstoragemgmt_disk.h0000664000175000017500000000670712537737032024265 00000000000000/* * Copyright (C) 2014 Red Hat, Inc. * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; If not, see . * * Author: tasleson * */ #ifndef LIBSTORAGEMGMT_DISK_H #define LIBSTORAGEMGMT_DISK_H #include "libstoragemgmt_common.h" #ifdef __cplusplus extern "C" { #endif /** * Free the memory for a disk record * @param d Disk memory to free * @return LSM_ERR_OK on success, else error reason. */ int LSM_DLL_EXPORT lsm_disk_record_free(lsm_disk *d); /** * Copy a disk record * @param d Disk record to copy * @return Copy of disk record */ lsm_disk LSM_DLL_EXPORT *lsm_disk_record_copy(lsm_disk *d); /** * Free an array of disk records * @param disk Array of disk records * @param size Size of disk array * @return LSM_ERR_OK on success, else error reason. */ int LSM_DLL_EXPORT lsm_disk_record_array_free(lsm_disk *disk[], uint32_t size); /** * Returns the disk id * Note: Return value is valid as long as disk pointer is valid. It gets * freed when record is freed. * @param d Disk record of interest * @return String id */ const char LSM_DLL_EXPORT *lsm_disk_id_get(lsm_disk *d); /** * Returns the disk name * Note: Return value is valid as long as disk pointer is valid. It gets * freed when record is freed. * @param d Disk record of interest * @return Disk name */ const char LSM_DLL_EXPORT *lsm_disk_name_get(lsm_disk *d); /** * Returns the disk type (enumeration) * Note: Return value is valid as long as disk pointer is valid. It gets * freed when record is freed. * @param d Disk record of interest * @return Disk type */ lsm_disk_type LSM_DLL_EXPORT lsm_disk_type_get(lsm_disk *d); /** * Returns number of blocks for disk * Note: Return value is valid as long as disk pointer is valid. It gets * freed when record is freed. * @param d Disk record of interest * @return Number of logical blocks */ uint64_t LSM_DLL_EXPORT lsm_disk_number_of_blocks_get(lsm_disk *d); /** * Returns the block size * Note: Return value is valid as long as disk pointer is valid. It gets * freed when record is freed. * @param d Disk record of interest * @return Block size in bytes */ uint64_t LSM_DLL_EXPORT lsm_disk_block_size_get(lsm_disk *d); /** * Returns the disk status * Note: Return value is valid as long as disk pointer is valid. It gets * freed when record is freed. * @param d Disk record of interest * @return Status of the disk */ uint64_t LSM_DLL_EXPORT lsm_disk_status_get(lsm_disk *d); /** * Returns the system id * Note: Return value is valid as long as disk pointer is valid. It gets * freed when record is freed. * @param d Disk record of interest * @return Which system the disk belongs too. */ const char LSM_DLL_EXPORT *lsm_disk_system_id_get(lsm_disk *d); #ifdef __cplusplus } #endif #endif /* LIBSTORAGEMGMT_DISK_H */ libstoragemgmt-1.2.3/c_binding/include/Makefile.am0000664000175000017500000000025312537546123017063 00000000000000## Process this file with automake to produce Makefile.in ## Copyright (C) 2011 Red Hat, Inc. ## See COPYING.LIB for the License of this software SUBDIRS=libstoragemgmt libstoragemgmt-1.2.3/c_binding/include/Makefile.in0000664000175000017500000004610712542455445017106 00000000000000# Makefile.in generated by automake 1.13.4 from Makefile.am. # @configure_input@ # Copyright (C) 1994-2013 Free Software Foundation, Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__is_gnu_make = test -n '$(MAKEFILE_LIST)' && test -n '$(MAKELEVEL)' am__make_running_with_option = \ case $${target_option-} in \ ?) ;; \ *) echo "am__make_running_with_option: internal error: invalid" \ "target option '$${target_option-}' specified" >&2; \ exit 1;; \ esac; \ has_opt=no; \ sane_makeflags=$$MAKEFLAGS; \ if $(am__is_gnu_make); then \ sane_makeflags=$$MFLAGS; \ else \ case $$MAKEFLAGS in \ *\\[\ \ ]*) \ bs=\\; \ sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \ | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \ esac; \ fi; \ skip_next=no; \ strip_trailopt () \ { \ flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \ }; \ for flg in $$sane_makeflags; do \ test $$skip_next = yes && { skip_next=no; continue; }; \ case $$flg in \ *=*|--*) continue;; \ -*I) strip_trailopt 'I'; skip_next=yes;; \ -*I?*) strip_trailopt 'I';; \ -*O) strip_trailopt 'O'; skip_next=yes;; \ -*O?*) strip_trailopt 'O';; \ -*l) strip_trailopt 'l'; skip_next=yes;; \ -*l?*) strip_trailopt 'l';; \ -[dEDm]) skip_next=yes;; \ -[JT]) skip_next=yes;; \ esac; \ case $$flg in \ *$$target_option*) has_opt=yes; break;; \ esac; \ done; \ test $$has_opt = yes am__make_dryrun = (target_option=n; $(am__make_running_with_option)) am__make_keepgoing = (target_option=k; $(am__make_running_with_option)) pkgdatadir = $(datadir)/@PACKAGE@ pkgincludedir = $(includedir)/@PACKAGE@ pkglibdir = $(libdir)/@PACKAGE@ pkglibexecdir = $(libexecdir)/@PACKAGE@ am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ subdir = c_binding/include DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/Makefile.am ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = $(top_srcdir)/m4/ax_python_module.m4 \ $(top_srcdir)/m4/libtool.m4 $(top_srcdir)/m4/ltoptions.m4 \ $(top_srcdir)/m4/ltsugar.m4 $(top_srcdir)/m4/ltversion.m4 \ $(top_srcdir)/m4/lt~obsolete.m4 $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) mkinstalldirs = $(install_sh) -d CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = CONFIG_CLEAN_VPATH_FILES = AM_V_P = $(am__v_P_@AM_V@) am__v_P_ = $(am__v_P_@AM_DEFAULT_V@) am__v_P_0 = false am__v_P_1 = : AM_V_GEN = $(am__v_GEN_@AM_V@) am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@) am__v_GEN_0 = @echo " GEN " $@; am__v_GEN_1 = AM_V_at = $(am__v_at_@AM_V@) am__v_at_ = $(am__v_at_@AM_DEFAULT_V@) am__v_at_0 = @ am__v_at_1 = SOURCES = DIST_SOURCES = RECURSIVE_TARGETS = all-recursive check-recursive cscopelist-recursive \ ctags-recursive dvi-recursive html-recursive info-recursive \ install-data-recursive install-dvi-recursive \ install-exec-recursive install-html-recursive \ install-info-recursive install-pdf-recursive \ install-ps-recursive install-recursive installcheck-recursive \ installdirs-recursive pdf-recursive ps-recursive \ tags-recursive uninstall-recursive am__can_run_installinfo = \ case $$AM_UPDATE_INFO_DIR in \ n|no|NO) false;; \ *) (install-info --version) >/dev/null 2>&1;; \ esac RECURSIVE_CLEAN_TARGETS = mostlyclean-recursive clean-recursive \ distclean-recursive maintainer-clean-recursive am__recursive_targets = \ $(RECURSIVE_TARGETS) \ $(RECURSIVE_CLEAN_TARGETS) \ $(am__extra_recursive_targets) AM_RECURSIVE_TARGETS = $(am__recursive_targets:-recursive=) TAGS CTAGS \ distdir am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) # Read a list of newline-separated strings from the standard input, # and print each of them once, without duplicates. Input order is # *not* preserved. am__uniquify_input = $(AWK) '\ BEGIN { nonempty = 0; } \ { items[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in items) print i; }; } \ ' # Make sure the list of sources is unique. This is necessary because, # e.g., the same source file might be shared among _SOURCES variables # for different programs/libraries. am__define_uniq_tagged_files = \ list='$(am__tagged_files)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | $(am__uniquify_input)` ETAGS = etags CTAGS = ctags DIST_SUBDIRS = $(SUBDIRS) DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) am__relativize = \ dir0=`pwd`; \ sed_first='s,^\([^/]*\)/.*$$,\1,'; \ sed_rest='s,^[^/]*/*,,'; \ sed_last='s,^.*/\([^/]*\)$$,\1,'; \ sed_butlast='s,/*[^/]*$$,,'; \ while test -n "$$dir1"; do \ first=`echo "$$dir1" | sed -e "$$sed_first"`; \ if test "$$first" != "."; then \ if test "$$first" = ".."; then \ dir2=`echo "$$dir0" | sed -e "$$sed_last"`/"$$dir2"; \ dir0=`echo "$$dir0" | sed -e "$$sed_butlast"`; \ else \ first2=`echo "$$dir2" | sed -e "$$sed_first"`; \ if test "$$first2" = "$$first"; then \ dir2=`echo "$$dir2" | sed -e "$$sed_rest"`; \ else \ dir2="../$$dir2"; \ fi; \ dir0="$$dir0"/"$$first"; \ fi; \ fi; \ dir1=`echo "$$dir1" | sed -e "$$sed_rest"`; \ done; \ reldir="$$dir2" ACLOCAL = @ACLOCAL@ AMTAR = @AMTAR@ AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@ AR = @AR@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ FGREP = @FGREP@ GREP = @GREP@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ JSON_CFLAGS = @JSON_CFLAGS@ JSON_LIBS = @JSON_LIBS@ LD = @LD@ LDFLAGS = @LDFLAGS@ LIBCHECK_CFLAGS = @LIBCHECK_CFLAGS@ LIBCHECK_LIBS = @LIBCHECK_LIBS@ LIBCONFIG_CFLAGS = @LIBCONFIG_CFLAGS@ LIBCONFIG_LIBS = @LIBCONFIG_LIBS@ LIBGLIB_CFLAGS = @LIBGLIB_CFLAGS@ LIBGLIB_LIBS = @LIBGLIB_LIBS@ LIBMICROHTTPD_CFLAGS = @LIBMICROHTTPD_CFLAGS@ LIBMICROHTTPD_LIBS = @LIBMICROHTTPD_LIBS@ LIBOBJS = @LIBOBJS@ LIBS = @LIBS@ LIBSM_LIBTOOL_VERSION = @LIBSM_LIBTOOL_VERSION@ LIBSM_MAJOR_VERSION = @LIBSM_MAJOR_VERSION@ LIBSM_MICRO_VERSION = @LIBSM_MICRO_VERSION@ LIBSM_MINOR_VERSION = @LIBSM_MINOR_VERSION@ LIBSM_VERSION = @LIBSM_VERSION@ LIBSM_VERSION_INFO = @LIBSM_VERSION_INFO@ LIBSM_VERSION_NUMBER = @LIBSM_VERSION_NUMBER@ LIBTOOL = @LIBTOOL@ LIBXML_CFLAGS = @LIBXML_CFLAGS@ LIBXML_LIBS = @LIBXML_LIBS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBOBJS = @LTLIBOBJS@ MAINT = @MAINT@ MAKEINFO = @MAKEINFO@ MANIFEST_TOOL = @MANIFEST_TOOL@ MKDIR_P = @MKDIR_P@ NM = @NM@ NMEDIT = @NMEDIT@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_URL = @PACKAGE_URL@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PERL = @PERL@ PKG_CONFIG = @PKG_CONFIG@ PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@ PKG_CONFIG_PATH = @PKG_CONFIG_PATH@ PYTHON = @PYTHON@ PYTHON_EXEC_PREFIX = @PYTHON_EXEC_PREFIX@ PYTHON_PLATFORM = @PYTHON_PLATFORM@ PYTHON_PREFIX = @PYTHON_PREFIX@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SSL_LIBS = @SSL_LIBS@ STRIP = @STRIP@ VERSION = @VERSION@ YAJL_LIBS = @YAJL_LIBS@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_AR = @ac_ct_AR@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ bashcompletiondir = @bashcompletiondir@ bindir = @bindir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ datadir = @datadir@ datarootdir = @datarootdir@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ install_sh = @install_sh@ libdir = @libdir@ libexecdir = @libexecdir@ localedir = @localedir@ localstatedir = @localstatedir@ mandir = @mandir@ mkdir_p = @mkdir_p@ oldincludedir = @oldincludedir@ pdfdir = @pdfdir@ pkgpyexecdir = @pkgpyexecdir@ pkgpythondir = @pkgpythondir@ prefix = @prefix@ program_transform_name = @program_transform_name@ psdir = @psdir@ pyexecdir = @pyexecdir@ pythondir = @pythondir@ sbindir = @sbindir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ systemdsystemunitdir = @systemdsystemunitdir@ target_alias = @target_alias@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ SUBDIRS = libstoragemgmt all: all-recursive .SUFFIXES: $(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu c_binding/include/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --gnu c_binding/include/Makefile .PRECIOUS: Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs # This directory's subdirectories are mostly independent; you can cd # into them and run 'make' without going through this Makefile. # To change the values of 'make' variables: instead of editing Makefiles, # (1) if the variable is set in 'config.status', edit 'config.status' # (which will cause the Makefiles to be regenerated when you run 'make'); # (2) otherwise, pass the desired values on the 'make' command line. $(am__recursive_targets): @fail=; \ if $(am__make_keepgoing); then \ failcom='fail=yes'; \ else \ failcom='exit 1'; \ fi; \ dot_seen=no; \ target=`echo $@ | sed s/-recursive//`; \ case "$@" in \ distclean-* | maintainer-clean-*) list='$(DIST_SUBDIRS)' ;; \ *) list='$(SUBDIRS)' ;; \ esac; \ for subdir in $$list; do \ echo "Making $$target in $$subdir"; \ if test "$$subdir" = "."; then \ dot_seen=yes; \ local_target="$$target-am"; \ else \ local_target="$$target"; \ fi; \ ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \ || eval $$failcom; \ done; \ if test "$$dot_seen" = "no"; then \ $(MAKE) $(AM_MAKEFLAGS) "$$target-am" || exit 1; \ fi; test -z "$$fail" ID: $(am__tagged_files) $(am__define_uniq_tagged_files); mkid -fID $$unique tags: tags-recursive TAGS: tags tags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files) set x; \ here=`pwd`; \ if ($(ETAGS) --etags-include --version) >/dev/null 2>&1; then \ include_option=--etags-include; \ empty_fix=.; \ else \ include_option=--include; \ empty_fix=; \ fi; \ list='$(SUBDIRS)'; for subdir in $$list; do \ if test "$$subdir" = .; then :; else \ test ! -f $$subdir/TAGS || \ set "$$@" "$$include_option=$$here/$$subdir/TAGS"; \ fi; \ done; \ $(am__define_uniq_tagged_files); \ shift; \ if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ if test $$# -gt 0; then \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ "$$@" $$unique; \ else \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ $$unique; \ fi; \ fi ctags: ctags-recursive CTAGS: ctags ctags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files) $(am__define_uniq_tagged_files); \ test -z "$(CTAGS_ARGS)$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$unique GTAGS: here=`$(am__cd) $(top_builddir) && pwd` \ && $(am__cd) $(top_srcdir) \ && gtags -i $(GTAGS_ARGS) "$$here" cscopelist: cscopelist-recursive cscopelist-am: $(am__tagged_files) list='$(am__tagged_files)'; \ case "$(srcdir)" in \ [\\/]* | ?:[\\/]*) sdir="$(srcdir)" ;; \ *) sdir=$(subdir)/$(srcdir) ;; \ esac; \ for i in $$list; do \ if test -f "$$i"; then \ echo "$(subdir)/$$i"; \ else \ echo "$$sdir/$$i"; \ fi; \ done >> $(top_builddir)/cscope.files distclean-tags: -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags distdir: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done @list='$(DIST_SUBDIRS)'; for subdir in $$list; do \ if test "$$subdir" = .; then :; else \ $(am__make_dryrun) \ || test -d "$(distdir)/$$subdir" \ || $(MKDIR_P) "$(distdir)/$$subdir" \ || exit 1; \ dir1=$$subdir; dir2="$(distdir)/$$subdir"; \ $(am__relativize); \ new_distdir=$$reldir; \ dir1=$$subdir; dir2="$(top_distdir)"; \ $(am__relativize); \ new_top_distdir=$$reldir; \ echo " (cd $$subdir && $(MAKE) $(AM_MAKEFLAGS) top_distdir="$$new_top_distdir" distdir="$$new_distdir" \\"; \ echo " am__remove_distdir=: am__skip_length_check=: am__skip_mode_fix=: distdir)"; \ ($(am__cd) $$subdir && \ $(MAKE) $(AM_MAKEFLAGS) \ top_distdir="$$new_top_distdir" \ distdir="$$new_distdir" \ am__remove_distdir=: \ am__skip_length_check=: \ am__skip_mode_fix=: \ distdir) \ || exit 1; \ fi; \ done check-am: all-am check: check-recursive all-am: Makefile installdirs: installdirs-recursive installdirs-am: install: install-recursive install-exec: install-exec-recursive install-data: install-data-recursive uninstall: uninstall-recursive install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-recursive install-strip: if test -z '$(STRIP)'; then \ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ install; \ else \ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ fi mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-recursive clean-am: clean-generic clean-libtool mostlyclean-am distclean: distclean-recursive -rm -f Makefile distclean-am: clean-am distclean-generic distclean-tags dvi: dvi-recursive dvi-am: html: html-recursive html-am: info: info-recursive info-am: install-data-am: install-dvi: install-dvi-recursive install-dvi-am: install-exec-am: install-html: install-html-recursive install-html-am: install-info: install-info-recursive install-info-am: install-man: install-pdf: install-pdf-recursive install-pdf-am: install-ps: install-ps-recursive install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-recursive -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-recursive mostlyclean-am: mostlyclean-generic mostlyclean-libtool pdf: pdf-recursive pdf-am: ps: ps-recursive ps-am: uninstall-am: .MAKE: $(am__recursive_targets) install-am install-strip .PHONY: $(am__recursive_targets) CTAGS GTAGS TAGS all all-am check \ check-am clean clean-generic clean-libtool cscopelist-am ctags \ ctags-am distclean distclean-generic distclean-libtool \ distclean-tags distdir dvi dvi-am html html-am info info-am \ install install-am install-data install-data-am install-dvi \ install-dvi-am install-exec install-exec-am install-html \ install-html-am install-info install-info-am install-man \ install-pdf install-pdf-am install-ps install-ps-am \ install-strip installcheck installcheck-am installdirs \ installdirs-am maintainer-clean maintainer-clean-generic \ mostlyclean mostlyclean-generic mostlyclean-libtool pdf pdf-am \ ps ps-am tags tags-am uninstall uninstall-am # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: libstoragemgmt-1.2.3/c_binding/lsm_plugin_ipc.cpp0000664000175000017500000025646312537737032017135 00000000000000/* * Copyright (C) 2011-2014 Red Hat, Inc. * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; If not, see . * * Author: tasleson */ #include "lsm_plugin_ipc.hpp" #include "lsm_datatypes.hpp" #include "lsm_ipc.hpp" #include "lsm_convert.hpp" #include "libstoragemgmt/libstoragemgmt_systems.h" #include "libstoragemgmt/libstoragemgmt_blockrange.h" #include "libstoragemgmt/libstoragemgmt_disk.h" #include "libstoragemgmt/libstoragemgmt_accessgroups.h" #include "libstoragemgmt/libstoragemgmt_fs.h" #include "libstoragemgmt/libstoragemgmt_snapshot.h" #include "libstoragemgmt/libstoragemgmt_nfsexport.h" #include "libstoragemgmt/libstoragemgmt_plug_interface.h" #include "libstoragemgmt/libstoragemgmt_targetport.h" #include "libstoragemgmt/libstoragemgmt_volumes.h" #include "libstoragemgmt/libstoragemgmt_pool.h" #include #include #include #include "util/qparams.h" #include //Forward decl. static int lsm_plugin_run(lsm_plugin_ptr plug); /** * Safe string wrapper * @param s Character array to convert to std::string * @return String representation. */ static std::string ss(char *s) { if (s) { return std::string(s); } return std::string(); } void *lsm_data_type_copy(lsm_data_type t, void *item) { void *rc = NULL; if (item) { switch (t) { case (LSM_DATA_TYPE_ACCESS_GROUP): rc = lsm_access_group_record_copy((lsm_access_group *) item); break; case (LSM_DATA_TYPE_BLOCK_RANGE): rc = lsm_block_range_record_copy((lsm_block_range *) item); break; case (LSM_DATA_TYPE_FS): rc = lsm_fs_record_copy((lsm_fs *) item); break; case (LSM_DATA_TYPE_NFS_EXPORT): rc = lsm_nfs_export_record_copy((lsm_nfs_export *) item); break; case (LSM_DATA_TYPE_POOL): rc = lsm_pool_record_copy((lsm_pool *) item); break; case (LSM_DATA_TYPE_SS): rc = lsm_fs_ss_record_copy((lsm_fs_ss *) item); break; case (LSM_DATA_TYPE_STRING_LIST): rc = lsm_string_list_copy((lsm_string_list *) item); break; case (LSM_DATA_TYPE_SYSTEM): rc = lsm_system_record_copy((lsm_system *) item); break; case (LSM_DATA_TYPE_VOLUME): rc = lsm_volume_record_copy((lsm_volume *) item); break; case (LSM_DATA_TYPE_DISK): rc = lsm_disk_record_copy((lsm_disk *) item); break; default: break; } } return rc; } static Value job_handle(const Value & val, char *job) { std::vector < Value > r; r.push_back(Value(job)); r.push_back(val); return Value(r); } int lsm_register_plugin_v1(lsm_plugin_ptr plug, void *private_data, struct lsm_mgmt_ops_v1 *mgm_op, struct lsm_san_ops_v1 *san_op, struct lsm_fs_ops_v1 *fs_op, struct lsm_nas_ops_v1 *nas_op) { int rc = LSM_ERR_INVALID_ARGUMENT; if (LSM_IS_PLUGIN(plug)) { plug->private_data = private_data; plug->mgmt_ops = mgm_op; plug->san_ops = san_op; plug->fs_ops = fs_op; plug->nas_ops = nas_op; rc = LSM_ERR_OK; } return rc; } int lsm_register_plugin_v1_2(lsm_plugin_ptr plug, void *private_data, struct lsm_mgmt_ops_v1 *mgm_op, struct lsm_san_ops_v1 *san_op, struct lsm_fs_ops_v1 *fs_op, struct lsm_nas_ops_v1 *nas_op, struct lsm_ops_v1_2 *ops_v1_2) { int rc = lsm_register_plugin_v1(plug, private_data, mgm_op, san_op, fs_op, nas_op); if (rc != LSM_ERR_OK) { return rc; } plug->ops_v1_2 = ops_v1_2; return rc; } void *lsm_private_data_get(lsm_plugin_ptr plug) { if (!LSM_IS_PLUGIN(plug)) { return NULL; } return plug->private_data; } static void lsm_plugin_free(lsm_plugin_ptr p, lsm_flag flags) { if (LSM_IS_PLUGIN(p)) { delete(p->tp); p->tp = NULL; if (p->unreg) { p->unreg(p, flags); } free(p->desc); p->desc = NULL; free(p->version); p->version = NULL; lsm_error_free(p->error); p->error = NULL; p->magic = LSM_DEL_MAGIC(LSM_PLUGIN_MAGIC); free(p); } } static lsm_plugin_ptr lsm_plugin_alloc(lsm_plugin_register reg, lsm_plugin_unregister unreg, const char *desc, const char *version) { if (!reg || !unreg) { return NULL; } lsm_plugin_ptr rc = (lsm_plugin_ptr) calloc(1, sizeof(lsm_plugin)); if (rc) { rc->magic = LSM_PLUGIN_MAGIC; rc->reg = reg; rc->unreg = unreg; rc->desc = strdup(desc); rc->version = strdup(version); if (!rc->desc || !rc->version) { lsm_plugin_free(rc, LSM_CLIENT_FLAG_RSVD); rc = NULL; } } return rc; } static void error_send(lsm_plugin_ptr p, int error_code) { if (!LSM_IS_PLUGIN(p)) { return; } if (p->error) { if (p->tp) { p->tp->errorSend(p->error->code, ss(p->error->message), ss(p->error->debug)); lsm_error_free(p->error); p->error = NULL; } } else { p->tp->errorSend(error_code, "Plugin didn't provide error message", ""); } } static int get_search_params(Value & params, char **k, char **v) { int rc = LSM_ERR_OK; Value key = params["search_key"]; Value val = params["search_value"]; if (Value::string_t == key.valueType()) { if (Value::string_t == val.valueType()) { *k = strdup(key.asC_str()); *v = strdup(val.asC_str()); if (*k == NULL || *v == NULL) { free(*k); *k = NULL; free(*v); *v = NULL; rc = LSM_ERR_NO_MEMORY; } } else { rc = LSM_ERR_TRANSPORT_INVALID_ARG; } } else if (Value::null_t != key.valueType()) { rc = LSM_ERR_TRANSPORT_INVALID_ARG; } return rc; } /** * Checks to see if a character string is an integer and returns result * @param[in] sn Character array holding the integer * @param[out] num The numeric value contained in string * @return true if sn is an integer, else false */ static bool get_num(char *sn, int &num) { errno = 0; num = strtol(sn, NULL, 10); if (!errno) { return true; } return false; } int lsm_plugin_init_v1(int argc, char *argv[], lsm_plugin_register reg, lsm_plugin_unregister unreg, const char *desc, const char *version) { int rc = 1; lsm_plugin_ptr plug = NULL; if (NULL == desc || NULL == version) { return LSM_ERR_INVALID_ARGUMENT; } int sd = 0; if (argc == 2 && get_num(argv[1], sd)) { plug = lsm_plugin_alloc(reg, unreg, desc, version); if (plug) { plug->tp = new Ipc(sd); if (plug->tp) { rc = lsm_plugin_run(plug); } else { lsm_plugin_free(plug, LSM_CLIENT_FLAG_RSVD); rc = LSM_ERR_NO_MEMORY; } } else { rc = LSM_ERR_NO_MEMORY; } } else { //Process command line arguments or display help text. rc = 2; } return rc; } typedef int (*handler) (lsm_plugin_ptr p, Value & params, Value & response); static int handle_unregister(lsm_plugin_ptr p, Value & params, Value & response) { /* This is handled in the event loop */ return LSM_ERR_OK; } static int handle_register(lsm_plugin_ptr p, Value & params, Value & response) { int rc = LSM_ERR_NO_SUPPORT; std::string uri_string; std::string password; if (p && p->reg) { Value uri_v = params["uri"]; Value passwd_v = params["password"]; Value tmo_v = params["timeout"]; if (Value::string_t == uri_v.valueType() && (Value::string_t == passwd_v.valueType() || Value::null_t == passwd_v.valueType()) && Value::numeric_t == tmo_v.valueType()) { lsm_flag flags = LSM_FLAG_GET_VALUE(params); uri_string = uri_v.asString(); if (Value::string_t == params["password"].valueType()) { password = params["password"].asString(); } //Let the plug-in initialize itself. rc = p->reg(p, uri_string.c_str(), password.c_str(), tmo_v.asUint32_t(), flags); } else { rc = LSM_ERR_TRANSPORT_INVALID_ARG; } } else { rc = LSM_ERR_NO_SUPPORT; } return rc; } static int handle_set_time_out(lsm_plugin_ptr p, Value & params, Value & response) { if (p && p->mgmt_ops && p->mgmt_ops->tmo_set) { if (Value::numeric_t == params["ms"].valueType() && LSM_FLAG_EXPECTED_TYPE(params)) { return p->mgmt_ops->tmo_set(p, params["ms"].asUint32_t(), LSM_FLAG_GET_VALUE(params)); } else { return LSM_ERR_TRANSPORT_INVALID_ARG; } } return LSM_ERR_NO_SUPPORT; } static int handle_get_time_out(lsm_plugin_ptr p, Value & params, Value & response) { uint32_t tmo = 0; int rc = LSM_ERR_NO_SUPPORT; if (p && p->mgmt_ops && p->mgmt_ops->tmo_get) { if (LSM_FLAG_EXPECTED_TYPE(params)) { rc = p->mgmt_ops->tmo_get(p, &tmo, LSM_FLAG_GET_VALUE(params)); if (LSM_ERR_OK == rc) { response = Value(tmo); } } else { rc = LSM_ERR_TRANSPORT_INVALID_ARG; } } return rc; } static int handle_job_status(lsm_plugin_ptr p, Value & params, Value & response) { std::string job_id; lsm_job_status status; uint8_t percent; lsm_data_type t = LSM_DATA_TYPE_UNKNOWN; void *value = NULL; int rc = LSM_ERR_NO_SUPPORT; if (p && p->mgmt_ops && p->mgmt_ops->job_status) { if (Value::string_t != params["job_id"].valueType() && !LSM_FLAG_EXPECTED_TYPE(params)) { rc = LSM_ERR_TRANSPORT_INVALID_ARG; } else { job_id = params["job_id"].asString(); rc = p->mgmt_ops->job_status(p, job_id.c_str(), &status, &percent, &t, &value, LSM_FLAG_GET_VALUE(params)); if (LSM_ERR_OK == rc) { std::vector < Value > result; result.push_back(Value((int32_t) status)); result.push_back(Value(percent)); if (NULL == value) { result.push_back(Value()); } else { if (LSM_DATA_TYPE_VOLUME == t && LSM_IS_VOL((lsm_volume *) value)) { result.push_back(volume_to_value((lsm_volume *) value)); lsm_volume_record_free((lsm_volume *) value); } else if (LSM_DATA_TYPE_FS == t && LSM_IS_FS((lsm_fs *) value)) { result.push_back(fs_to_value((lsm_fs *) value)); lsm_fs_record_free((lsm_fs *) value); } else if (LSM_DATA_TYPE_SS == t && LSM_IS_SS((lsm_fs_ss *) value)) { result.push_back(ss_to_value((lsm_fs_ss *) value)); lsm_fs_ss_record_free((lsm_fs_ss *) value); } else if (LSM_DATA_TYPE_POOL == t && LSM_IS_POOL((lsm_pool *) value)) { result.push_back(pool_to_value((lsm_pool *) value)); lsm_pool_record_free((lsm_pool *) value); } else { rc = LSM_ERR_PLUGIN_BUG; } } response = Value(result); } } } return rc; } static int handle_plugin_info(lsm_plugin_ptr p, Value & params, Value & response) { int rc = LSM_ERR_NO_SUPPORT; if (p) { std::vector < Value > result; result.push_back(Value(p->desc)); result.push_back(Value(p->version)); response = Value(result); rc = LSM_ERR_OK; } return rc; } static int handle_job_free(lsm_plugin_ptr p, Value & params, Value & response) { int rc = LSM_ERR_NO_SUPPORT; if (p && p->mgmt_ops && p->mgmt_ops->job_free) { if (Value::string_t == params["job_id"].valueType() && LSM_FLAG_EXPECTED_TYPE(params)) { std::string job_num = params["job_id"].asString(); char *j = (char *) job_num.c_str(); rc = p->mgmt_ops->job_free(p, j, LSM_FLAG_GET_VALUE(params)); } else { rc = LSM_ERR_TRANSPORT_INVALID_ARG; } } return rc; } static int handle_system_list(lsm_plugin_ptr p, Value & params, Value & response) { int rc = LSM_ERR_NO_SUPPORT; if (p && p->mgmt_ops && p->mgmt_ops->system_list) { lsm_system **systems; uint32_t count = 0; if (LSM_FLAG_EXPECTED_TYPE(params)) { rc = p->mgmt_ops->system_list(p, &systems, &count, LSM_FLAG_GET_VALUE(params)); if (LSM_ERR_OK == rc) { std::vector < Value > result; result.reserve(count); for (uint32_t i = 0; i < count; ++i) { result.push_back(system_to_value(systems[i])); } lsm_system_record_array_free(systems, count); systems = NULL; response = Value(result); } } else { rc = LSM_ERR_TRANSPORT_INVALID_ARG; } } return rc; } static int handle_pools(lsm_plugin_ptr p, Value & params, Value & response) { int rc = LSM_ERR_NO_SUPPORT; char *key = NULL; char *val = NULL; if (p && p->mgmt_ops && p->mgmt_ops->pool_list) { lsm_pool **pools = NULL; uint32_t count = 0; if (LSM_FLAG_EXPECTED_TYPE(params) && ((rc = get_search_params(params, &key, &val)) == LSM_ERR_OK)) { rc = p->mgmt_ops->pool_list(p, key, val, &pools, &count, LSM_FLAG_GET_VALUE(params)); if (LSM_ERR_OK == rc) { std::vector < Value > result; result.reserve(count); for (uint32_t i = 0; i < count; ++i) { result.push_back(pool_to_value(pools[i])); } lsm_pool_record_array_free(pools, count); pools = NULL; response = Value(result); } free(key); free(val); } else { if (rc == LSM_ERR_NO_SUPPORT) { rc = LSM_ERR_TRANSPORT_INVALID_ARG; } } } return rc; } static int handle_target_ports(lsm_plugin_ptr p, Value & params, Value & response) { int rc = LSM_ERR_NO_SUPPORT; char *key = NULL; char *val = NULL; if (p && p->san_ops && p->san_ops->target_port_list) { lsm_target_port **target_ports = NULL; uint32_t count = 0; if (LSM_FLAG_EXPECTED_TYPE(params) && ((rc = get_search_params(params, &key, &val)) == LSM_ERR_OK)) { rc = p->san_ops->target_port_list(p, key, val, &target_ports, &count, LSM_FLAG_GET_VALUE(params)); if (LSM_ERR_OK == rc) { std::vector < Value > result; result.reserve(count); for (uint32_t i = 0; i < count; ++i) { result.push_back(target_port_to_value(target_ports[i])); } lsm_target_port_record_array_free(target_ports, count); target_ports = NULL; response = Value(result); } free(key); free(val); } else { if (rc == LSM_ERR_NO_SUPPORT) { rc = LSM_ERR_TRANSPORT_INVALID_ARG; } } } return rc; } static int capabilities(lsm_plugin_ptr p, Value & params, Value & response) { int rc = LSM_ERR_NO_SUPPORT; if (p && p->mgmt_ops && p->mgmt_ops->capablities) { lsm_storage_capabilities *c = NULL; Value v_s = params["system"]; if (IS_CLASS_SYSTEM(v_s) && LSM_FLAG_EXPECTED_TYPE(params)) { lsm_system *sys = value_to_system(v_s); if (sys) { rc = p->mgmt_ops->capablities(p, sys, &c, LSM_FLAG_GET_VALUE(params)); if (LSM_ERR_OK == rc) { response = capabilities_to_value(c); lsm_capability_record_free(c); c = NULL; } lsm_system_record_free(sys); } else { rc = LSM_ERR_NO_MEMORY; } } else { rc = LSM_ERR_TRANSPORT_INVALID_ARG; } } return rc; } static void get_volumes(int rc, lsm_volume ** vols, uint32_t count, Value & response) { if (LSM_ERR_OK == rc) { std::vector < Value > result; result.reserve(count); for (uint32_t i = 0; i < count; ++i) { result.push_back(volume_to_value(vols[i])); } lsm_volume_record_array_free(vols, count); vols = NULL; response = Value(result); } } static int handle_volumes(lsm_plugin_ptr p, Value & params, Value & response) { int rc = LSM_ERR_NO_SUPPORT; char *key = NULL; char *val = NULL; if (p && p->san_ops && p->san_ops->vol_get) { lsm_volume **vols = NULL; uint32_t count = 0; if (LSM_FLAG_EXPECTED_TYPE(params) && (rc = get_search_params(params, &key, &val)) == LSM_ERR_OK) { rc = p->san_ops->vol_get(p, key, val, &vols, &count, LSM_FLAG_GET_VALUE(params)); get_volumes(rc, vols, count, response); free(key); free(val); } else { if (rc == LSM_ERR_NO_SUPPORT) { rc = LSM_ERR_TRANSPORT_INVALID_ARG; } } } return rc; } static void get_disks(int rc, lsm_disk ** disks, uint32_t count, Value & response) { if (LSM_ERR_OK == rc) { std::vector < Value > result; result.reserve(count); for (uint32_t i = 0; i < count; ++i) { result.push_back(disk_to_value(disks[i])); } lsm_disk_record_array_free(disks, count); disks = NULL; response = Value(result); } } static int handle_disks(lsm_plugin_ptr p, Value & params, Value & response) { int rc = LSM_ERR_NO_SUPPORT; char *key = NULL; char *val = NULL; if (p && p->san_ops && p->san_ops->disk_get) { lsm_disk **disks = NULL; uint32_t count = 0; if (LSM_FLAG_EXPECTED_TYPE(params) && (rc = get_search_params(params, &key, &val)) == LSM_ERR_OK) { rc = p->san_ops->disk_get(p, key, val, &disks, &count, LSM_FLAG_GET_VALUE(params)); get_disks(rc, disks, count, response); free(key); free(val); } else { if (rc == LSM_ERR_NO_SUPPORT) { rc = LSM_ERR_TRANSPORT_INVALID_ARG; } } } return rc; } static int handle_volume_create(lsm_plugin_ptr p, Value & params, Value & response) { int rc = LSM_ERR_NO_SUPPORT; if (p && p->san_ops && p->san_ops->vol_create) { Value v_p = params["pool"]; Value v_name = params["volume_name"]; Value v_size = params["size_bytes"]; Value v_prov = params["provisioning"]; if (IS_CLASS_POOL(v_p) && Value::string_t == v_name.valueType() && Value::numeric_t == v_size.valueType() && Value::numeric_t == v_prov.valueType() && LSM_FLAG_EXPECTED_TYPE(params)) { lsm_pool *pool = value_to_pool(v_p); if (pool) { lsm_volume *vol = NULL; char *job = NULL; const char *name = v_name.asC_str(); uint64_t size = v_size.asUint64_t(); lsm_volume_provision_type pro = (lsm_volume_provision_type) v_prov.asInt32_t(); rc = p->san_ops->vol_create(p, pool, name, size, pro, &vol, &job, LSM_FLAG_GET_VALUE(params)); Value v = volume_to_value(vol); response = job_handle(v, job); //Free dynamic data. lsm_pool_record_free(pool); lsm_volume_record_free(vol); free(job); } else { rc = LSM_ERR_NO_MEMORY; } } else { rc = LSM_ERR_TRANSPORT_INVALID_ARG; } } return rc; } static int handle_volume_resize(lsm_plugin_ptr p, Value & params, Value & response) { int rc = LSM_ERR_NO_SUPPORT; if (p && p->san_ops && p->san_ops->vol_resize) { Value v_vol = params["volume"]; Value v_size = params["new_size_bytes"]; if (IS_CLASS_VOLUME(v_vol) && Value::numeric_t == v_size.valueType() && LSM_FLAG_EXPECTED_TYPE(params)) { lsm_volume *vol = value_to_volume(v_vol); if (vol) { lsm_volume *resized_vol = NULL; uint64_t size = v_size.asUint64_t(); char *job = NULL; rc = p->san_ops->vol_resize(p, vol, size, &resized_vol, &job, LSM_FLAG_GET_VALUE(params)); Value v = volume_to_value(resized_vol); response = job_handle(v, job); lsm_volume_record_free(vol); lsm_volume_record_free(resized_vol); free(job); } else { rc = LSM_ERR_NO_MEMORY; } } else { rc = LSM_ERR_TRANSPORT_INVALID_ARG; } } return rc; } static int handle_volume_replicate(lsm_plugin_ptr p, Value & params, Value & response) { int rc = LSM_ERR_NO_SUPPORT; if (p && p->san_ops && p->san_ops->vol_replicate) { Value v_pool = params["pool"]; Value v_vol_src = params["volume_src"]; Value v_rep = params["rep_type"]; Value v_name = params["name"]; if (((Value::object_t == v_pool.valueType() && IS_CLASS_POOL(v_pool)) || Value::null_t == v_pool.valueType()) && IS_CLASS_VOLUME(v_vol_src) && Value::numeric_t == v_rep.valueType() && Value::string_t == v_name.valueType() && LSM_FLAG_EXPECTED_TYPE(params)) { lsm_pool *pool = (Value::null_t == v_pool.valueType())? NULL : value_to_pool(v_pool); lsm_volume *vol = value_to_volume(v_vol_src); lsm_volume *newVolume = NULL; lsm_replication_type rep = (lsm_replication_type) v_rep.asInt32_t(); const char *name = v_name.asC_str(); char *job = NULL; if (vol && (pool || (!pool && Value::null_t == v_pool.valueType()))) { rc = p->san_ops->vol_replicate(p, pool, rep, vol, name, &newVolume, &job, LSM_FLAG_GET_VALUE(params)); Value v = volume_to_value(newVolume); response = job_handle(v, job); lsm_volume_record_free(newVolume); free(job); } else { rc = LSM_ERR_NO_MEMORY; } lsm_pool_record_free(pool); lsm_volume_record_free(vol); } else { rc = LSM_ERR_TRANSPORT_INVALID_ARG; } } return rc; } static int handle_volume_replicate_range_block_size(lsm_plugin_ptr p, Value & params, Value & response) { int rc = LSM_ERR_NO_SUPPORT; uint32_t block_size = 0; if (p && p->san_ops && p->san_ops->vol_rep_range_bs) { Value v_s = params["system"]; if (IS_CLASS_SYSTEM(v_s) && LSM_FLAG_EXPECTED_TYPE(params)) { lsm_system *sys = value_to_system(v_s); if (sys) { rc = p->san_ops->vol_rep_range_bs(p, sys, &block_size, LSM_FLAG_GET_VALUE(params)); if (LSM_ERR_OK == rc) { response = Value(block_size); } lsm_system_record_free(sys); } else { rc = LSM_ERR_NO_MEMORY; } } else { rc = LSM_ERR_TRANSPORT_INVALID_ARG; } } return rc; } static int handle_volume_replicate_range(lsm_plugin_ptr p, Value & params, Value & response) { int rc = LSM_ERR_NO_SUPPORT; uint32_t range_count = 0; char *job = NULL; if (p && p->san_ops && p->san_ops->vol_rep_range) { Value v_rep = params["rep_type"]; Value v_vol_src = params["volume_src"]; Value v_vol_dest = params["volume_dest"]; Value v_ranges = params["ranges"]; if (Value::numeric_t == v_rep.valueType() && IS_CLASS_VOLUME(v_vol_src) && IS_CLASS_VOLUME(v_vol_dest) && Value::array_t == v_ranges.valueType() && LSM_FLAG_EXPECTED_TYPE(params)) { lsm_replication_type repType = (lsm_replication_type) v_rep.asInt32_t(); lsm_volume *source = value_to_volume(v_vol_src); lsm_volume *dest = value_to_volume(v_vol_dest); lsm_block_range **ranges = value_to_block_range_list(v_ranges, &range_count); if (source && dest && ranges) { rc = p->san_ops->vol_rep_range(p, repType, source, dest, ranges, range_count, &job, LSM_FLAG_GET_VALUE(params)); if (LSM_ERR_JOB_STARTED == rc) { response = Value(job); free(job); job = NULL; } } else { rc = LSM_ERR_NO_MEMORY; } lsm_volume_record_free(source); lsm_volume_record_free(dest); lsm_block_range_record_array_free(ranges, range_count); } else { rc = LSM_ERR_TRANSPORT_INVALID_ARG; } } return rc; } static int handle_volume_delete(lsm_plugin_ptr p, Value & params, Value & response) { int rc = LSM_ERR_NO_SUPPORT; if (p && p->san_ops && p->san_ops->vol_delete) { Value v_vol = params["volume"]; if (IS_CLASS_VOLUME(v_vol) && LSM_FLAG_EXPECTED_TYPE(params)) { lsm_volume *vol = value_to_volume(v_vol); if (vol) { char *job = NULL; rc = p->san_ops->vol_delete(p, vol, &job, LSM_FLAG_GET_VALUE(params)); if (LSM_ERR_JOB_STARTED == rc) { response = Value(job); } lsm_volume_record_free(vol); free(job); } else { rc = LSM_ERR_NO_MEMORY; } } else { rc = LSM_ERR_TRANSPORT_INVALID_ARG; } } return rc; } static int handle_vol_enable_disable(lsm_plugin_ptr p, Value & params, Value & response, int online) { int rc = LSM_ERR_NO_SUPPORT; if (p && p->san_ops && ((online) ? p->san_ops->vol_enable : p->san_ops->vol_disable)) { Value v_vol = params["volume"]; if (IS_CLASS_VOLUME(v_vol) && LSM_FLAG_EXPECTED_TYPE(params)) { lsm_volume *vol = value_to_volume(v_vol); if (vol) { if (online) { rc = p->san_ops->vol_enable(p, vol, LSM_FLAG_GET_VALUE(params)); } else { rc = p->san_ops->vol_disable(p, vol, LSM_FLAG_GET_VALUE(params)); } lsm_volume_record_free(vol); } else { rc = LSM_ERR_NO_MEMORY; } } else { rc = LSM_ERR_TRANSPORT_INVALID_ARG; } } return rc; } static int handle_volume_enable(lsm_plugin_ptr p, Value & params, Value & response) { return handle_vol_enable_disable(p, params, response, 1); } static int handle_volume_disable(lsm_plugin_ptr p, Value & params, Value & response) { return handle_vol_enable_disable(p, params, response, 0); } static int handle_volume_raid_info(lsm_plugin_ptr p, Value & params, Value & response) { int rc = LSM_ERR_NO_SUPPORT; if (p && p->ops_v1_2 && p->ops_v1_2->vol_raid_info) { Value v_vol = params["volume"]; if (IS_CLASS_VOLUME(v_vol) && LSM_FLAG_EXPECTED_TYPE(params)) { lsm_volume *vol = value_to_volume(v_vol); std::vector < Value > result; if (vol) { lsm_volume_raid_type raid_type; uint32_t strip_size; uint32_t disk_count; uint32_t min_io_size; uint32_t opt_io_size; rc = p->ops_v1_2->vol_raid_info(p, vol, &raid_type, &strip_size, &disk_count, &min_io_size, &opt_io_size, LSM_FLAG_GET_VALUE(params)); if (LSM_ERR_OK == rc) { result.push_back(Value((int32_t) raid_type)); result.push_back(Value(strip_size)); result.push_back(Value(disk_count)); result.push_back(Value(min_io_size)); result.push_back(Value(opt_io_size)); response = Value(result); } lsm_volume_record_free(vol); } else { rc = LSM_ERR_NO_MEMORY; } } else { rc = LSM_ERR_TRANSPORT_INVALID_ARG; } } return rc; } static int handle_pool_member_info(lsm_plugin_ptr p, Value & params, Value & response) { int rc = LSM_ERR_NO_SUPPORT; if (p && p->ops_v1_2 && p->ops_v1_2->pool_member_info) { Value v_pool = params["pool"]; if (IS_CLASS_POOL(v_pool) && LSM_FLAG_EXPECTED_TYPE(params)) { lsm_pool *pool = value_to_pool(v_pool); std::vector < Value > result; if (pool) { lsm_volume_raid_type raid_type = LSM_VOLUME_RAID_TYPE_UNKNOWN; lsm_pool_member_type member_type = LSM_POOL_MEMBER_TYPE_UNKNOWN; lsm_string_list *member_ids = NULL; rc = p->ops_v1_2->pool_member_info(p, pool, &raid_type, &member_type, &member_ids, LSM_FLAG_GET_VALUE(params)); if (LSM_ERR_OK == rc) { result.push_back(Value((int32_t) raid_type)); result.push_back(Value((int32_t) member_type)); result.push_back(string_list_to_value(member_ids)); if (member_ids != NULL) { lsm_string_list_free(member_ids); } response = Value(result); } lsm_pool_record_free(pool); } else { rc = LSM_ERR_NO_MEMORY; } } else { rc = LSM_ERR_TRANSPORT_INVALID_ARG; } } return rc; } static int ag_list(lsm_plugin_ptr p, Value & params, Value & response) { int rc = LSM_ERR_NO_SUPPORT; char *key = NULL; char *val = NULL; if (p && p->san_ops && p->san_ops->ag_list) { if (LSM_FLAG_EXPECTED_TYPE(params) && (rc = get_search_params(params, &key, &val)) == LSM_ERR_OK) { lsm_access_group **groups = NULL; uint32_t count; rc = p->san_ops->ag_list(p, key, val, &groups, &count, LSM_FLAG_GET_VALUE(params)); if (LSM_ERR_OK == rc) { response = access_group_list_to_value(groups, count); /* Free the memory */ lsm_access_group_record_array_free(groups, count); } free(key); free(val); } else { if (rc == LSM_ERR_NO_SUPPORT) { rc = LSM_ERR_TRANSPORT_INVALID_ARG; } } } return rc; } static int ag_create(lsm_plugin_ptr p, Value & params, Value & response) { int rc = LSM_ERR_NO_SUPPORT; if (p && p->san_ops && p->san_ops->ag_create) { Value v_name = params["name"]; Value v_init_id = params["init_id"]; Value v_init_type = params["init_type"]; Value v_system = params["system"]; if (Value::string_t == v_name.valueType() && Value::string_t == v_init_id.valueType() && Value::numeric_t == v_init_type.valueType() && IS_CLASS_SYSTEM(v_system) && LSM_FLAG_EXPECTED_TYPE(params)) { lsm_access_group *ag = NULL; lsm_system *system = value_to_system(v_system); if (system) { rc = p->san_ops->ag_create(p, v_name.asC_str(), v_init_id.asC_str(), (lsm_access_group_init_type) v_init_type.asInt32_t(), system, &ag, LSM_FLAG_GET_VALUE(params)); if (LSM_ERR_OK == rc) { response = access_group_to_value(ag); lsm_access_group_record_free(ag); } lsm_system_record_free(system); system = NULL; } } else { rc = LSM_ERR_TRANSPORT_INVALID_ARG; } } return rc; } static int ag_delete(lsm_plugin_ptr p, Value & params, Value & response) { int rc = LSM_ERR_NO_SUPPORT; if (p && p->san_ops && p->san_ops->ag_delete) { Value v_access_group = params["access_group"]; if (IS_CLASS_ACCESS_GROUP(v_access_group) && LSM_FLAG_EXPECTED_TYPE(params)) { lsm_access_group *ag = value_to_access_group(v_access_group); if (ag) { rc = p->san_ops->ag_delete(p, ag, LSM_FLAG_GET_VALUE(params)); lsm_access_group_record_free(ag); } else { rc = LSM_ERR_NO_MEMORY; } } else { rc = LSM_ERR_TRANSPORT_INVALID_ARG; } } return rc; } static int ag_initiator_add(lsm_plugin_ptr p, Value & params, Value & response) { int rc = LSM_ERR_NO_SUPPORT; if (p && p->san_ops && p->san_ops->ag_add_initiator) { Value v_group = params["access_group"]; Value v_init_id = params["init_id"]; Value v_init_type = params["init_type"]; if (IS_CLASS_ACCESS_GROUP(v_group) && Value::string_t == v_init_id.valueType() && Value::numeric_t == v_init_type.valueType() && LSM_FLAG_EXPECTED_TYPE(params)) { lsm_access_group *ag = value_to_access_group(v_group); if (ag) { lsm_access_group *updated_access_group = NULL; const char *id = v_init_id.asC_str(); lsm_access_group_init_type id_type = (lsm_access_group_init_type) v_init_type.asInt32_t(); rc = p->san_ops->ag_add_initiator(p, ag, id, id_type, &updated_access_group, LSM_FLAG_GET_VALUE(params)); if (LSM_ERR_OK == rc) { response = access_group_to_value(updated_access_group); lsm_access_group_record_free(updated_access_group); } lsm_access_group_record_free(ag); } else { rc = LSM_ERR_NO_MEMORY; } } else { rc = LSM_ERR_TRANSPORT_INVALID_ARG; } } return rc; } static int ag_initiator_del(lsm_plugin_ptr p, Value & params, Value & response) { int rc = LSM_ERR_NO_SUPPORT; if (p && p->san_ops && p->san_ops->ag_del_initiator) { Value v_group = params["access_group"]; Value v_init_id = params["init_id"]; Value v_init_type = params["init_type"]; if (IS_CLASS_ACCESS_GROUP(v_group) && Value::string_t == v_init_id.valueType() && Value::numeric_t == v_init_type.valueType() && LSM_FLAG_EXPECTED_TYPE(params)) { lsm_access_group *ag = value_to_access_group(v_group); if (ag) { lsm_access_group *updated_access_group = NULL; const char *id = v_init_id.asC_str(); lsm_access_group_init_type id_type = (lsm_access_group_init_type) v_init_type.asInt32_t(); rc = p->san_ops->ag_del_initiator(p, ag, id, id_type, &updated_access_group, LSM_FLAG_GET_VALUE(params)); if (LSM_ERR_OK == rc) { response = access_group_to_value(updated_access_group); lsm_access_group_record_free(updated_access_group); } lsm_access_group_record_free(ag); } else { rc = LSM_ERR_NO_MEMORY; } } else { rc = LSM_ERR_TRANSPORT_INVALID_ARG; } } return rc; } static int volume_mask(lsm_plugin_ptr p, Value & params, Value & response) { int rc = LSM_ERR_NO_SUPPORT; if (p && p->san_ops && p->san_ops->ag_grant) { Value v_group = params["access_group"]; Value v_vol = params["volume"]; if (IS_CLASS_ACCESS_GROUP(v_group) && IS_CLASS_VOLUME(v_vol) && LSM_FLAG_EXPECTED_TYPE(params)) { lsm_access_group *ag = value_to_access_group(v_group); lsm_volume *vol = value_to_volume(v_vol); if (ag && vol) { rc = p->san_ops->ag_grant(p, ag, vol, LSM_FLAG_GET_VALUE(params)); } else { rc = LSM_ERR_NO_MEMORY; } lsm_access_group_record_free(ag); lsm_volume_record_free(vol); } else { rc = LSM_ERR_TRANSPORT_INVALID_ARG; } } return rc; } static int volume_unmask(lsm_plugin_ptr p, Value & params, Value & response) { int rc = LSM_ERR_NO_SUPPORT; if (p && p->san_ops && p->san_ops->ag_revoke) { Value v_group = params["access_group"]; Value v_vol = params["volume"]; if (IS_CLASS_ACCESS_GROUP(v_group) && IS_CLASS_VOLUME(v_vol) && LSM_FLAG_EXPECTED_TYPE(params)) { lsm_access_group *ag = value_to_access_group(v_group); lsm_volume *vol = value_to_volume(v_vol); if (ag && vol) { rc = p->san_ops->ag_revoke(p, ag, vol, LSM_FLAG_GET_VALUE(params)); } else { rc = LSM_ERR_NO_MEMORY; } lsm_access_group_record_free(ag); lsm_volume_record_free(vol); } else { rc = LSM_ERR_TRANSPORT_INVALID_ARG; } } return rc; } static int vol_accessible_by_ag(lsm_plugin_ptr p, Value & params, Value & response) { int rc = LSM_ERR_NO_SUPPORT; if (p && p->san_ops && p->san_ops->vol_accessible_by_ag) { Value v_access_group = params["access_group"]; if (IS_CLASS_ACCESS_GROUP(v_access_group) && LSM_FLAG_EXPECTED_TYPE(params)) { lsm_access_group *ag = value_to_access_group(v_access_group); if (ag) { lsm_volume **vols = NULL; uint32_t count = 0; rc = p->san_ops->vol_accessible_by_ag(p, ag, &vols, &count, LSM_FLAG_GET_VALUE (params)); if (LSM_ERR_OK == rc) { std::vector < Value > result; result.reserve(count); for (uint32_t i = 0; i < count; ++i) { result.push_back(volume_to_value(vols[i])); } response = Value(result); } lsm_access_group_record_free(ag); lsm_volume_record_array_free(vols, count); vols = NULL; } else { rc = LSM_ERR_NO_MEMORY; } } else { rc = LSM_ERR_TRANSPORT_INVALID_ARG; } } return rc; } static int ag_granted_to_volume(lsm_plugin_ptr p, Value & params, Value & response) { int rc = LSM_ERR_NO_SUPPORT; if (p && p->san_ops && p->san_ops->ag_granted_to_vol) { Value v_vol = params["volume"]; if (IS_CLASS_VOLUME(v_vol) && LSM_FLAG_EXPECTED_TYPE(params)) { lsm_volume *volume = value_to_volume(v_vol); if (volume) { lsm_access_group **groups = NULL; uint32_t count = 0; rc = p->san_ops->ag_granted_to_vol(p, volume, &groups, &count, LSM_FLAG_GET_VALUE(params)); if (LSM_ERR_OK == rc) { std::vector < Value > result; result.reserve(count); for (uint32_t i = 0; i < count; ++i) { result.push_back(access_group_to_value(groups[i])); } response = Value(result); } lsm_volume_record_free(volume); lsm_access_group_record_array_free(groups, count); groups = NULL; } else { rc = LSM_ERR_NO_MEMORY; } } else { rc = LSM_ERR_TRANSPORT_INVALID_ARG; } } return rc; } static int volume_dependency(lsm_plugin_ptr p, Value & params, Value & response) { int rc = LSM_ERR_NO_SUPPORT; if (p && p->san_ops && p->san_ops->vol_child_depends) { Value v_vol = params["volume"]; if (IS_CLASS_VOLUME(v_vol) && LSM_FLAG_EXPECTED_TYPE(params)) { lsm_volume *volume = value_to_volume(v_vol); if (volume) { uint8_t yes; rc = p->san_ops->vol_child_depends(p, volume, &yes, LSM_FLAG_GET_VALUE(params)); if (LSM_ERR_OK == rc) { response = Value((bool) (yes)); } lsm_volume_record_free(volume); } else { rc = LSM_ERR_NO_MEMORY; } } else { rc = LSM_ERR_TRANSPORT_INVALID_ARG; } } return rc; } static int volume_dependency_rm(lsm_plugin_ptr p, Value & params, Value & response) { int rc = LSM_ERR_NO_SUPPORT; if (p && p->san_ops && p->san_ops->vol_child_depends_rm) { Value v_vol = params["volume"]; if (IS_CLASS_VOLUME(v_vol) && LSM_FLAG_EXPECTED_TYPE(params)) { lsm_volume *volume = value_to_volume(v_vol); if (volume) { char *job = NULL; rc = p->san_ops->vol_child_depends_rm(p, volume, &job, LSM_FLAG_GET_VALUE (params)); if (LSM_ERR_JOB_STARTED == rc) { response = Value(job); free(job); } lsm_volume_record_free(volume); } else { rc = LSM_ERR_NO_MEMORY; } } else { rc = LSM_ERR_TRANSPORT_INVALID_ARG; } } return rc; } static int fs(lsm_plugin_ptr p, Value & params, Value & response) { int rc = LSM_ERR_NO_SUPPORT; char *key = NULL; char *val = NULL; if (p && p->fs_ops && p->fs_ops->fs_list) { if (LSM_FLAG_EXPECTED_TYPE(params) && ((rc = get_search_params(params, &key, &val)) == LSM_ERR_OK)) { lsm_fs **fs = NULL; uint32_t count = 0; rc = p->fs_ops->fs_list(p, key, val, &fs, &count, LSM_FLAG_GET_VALUE(params)); if (LSM_ERR_OK == rc) { std::vector < Value > result; result.reserve(count); for (uint32_t i = 0; i < count; ++i) { result.push_back(fs_to_value(fs[i])); } response = Value(result); lsm_fs_record_array_free(fs, count); fs = NULL; } free(key); free(val); } else { if (rc == LSM_ERR_NO_SUPPORT) { rc = LSM_ERR_TRANSPORT_INVALID_ARG; } } } return rc; } static int fs_create(lsm_plugin_ptr p, Value & params, Value & response) { int rc = LSM_ERR_NO_SUPPORT; if (p && p->fs_ops && p->fs_ops->fs_create) { Value v_pool = params["pool"]; Value v_name = params["name"]; Value v_size = params["size_bytes"]; if (IS_CLASS_POOL(v_pool) && Value::string_t == v_name.valueType() && Value::numeric_t == v_size.valueType() && LSM_FLAG_EXPECTED_TYPE(params)) { lsm_pool *pool = value_to_pool(v_pool); if (pool) { const char *name = params["name"].asC_str(); uint64_t size_bytes = params["size_bytes"].asUint64_t(); lsm_fs *fs = NULL; char *job = NULL; rc = p->fs_ops->fs_create(p, pool, name, size_bytes, &fs, &job, LSM_FLAG_GET_VALUE(params)); std::vector < Value > r; if (LSM_ERR_OK == rc) { r.push_back(Value()); r.push_back(fs_to_value(fs)); response = Value(r); lsm_fs_record_free(fs); } else if (LSM_ERR_JOB_STARTED == rc) { r.push_back(Value(job)); r.push_back(Value()); response = Value(r); free(job); } lsm_pool_record_free(pool); } else { rc = LSM_ERR_NO_MEMORY; } } else { rc = LSM_ERR_TRANSPORT_INVALID_ARG; } } return rc; } static int fs_delete(lsm_plugin_ptr p, Value & params, Value & response) { int rc = LSM_ERR_NO_SUPPORT; if (p && p->fs_ops && p->fs_ops->fs_delete) { Value v_fs = params["fs"]; if (IS_CLASS_FILE_SYSTEM(v_fs) && LSM_FLAG_EXPECTED_TYPE(params)) { lsm_fs *fs = value_to_fs(v_fs); if (fs) { char *job = NULL; rc = p->fs_ops->fs_delete(p, fs, &job, LSM_FLAG_GET_VALUE(params)); if (LSM_ERR_JOB_STARTED == rc) { response = Value(job); free(job); } lsm_fs_record_free(fs); } else { rc = LSM_ERR_NO_MEMORY; } } else { rc = LSM_ERR_TRANSPORT_INVALID_ARG; } } return rc; } static int fs_resize(lsm_plugin_ptr p, Value & params, Value & response) { int rc = LSM_ERR_NO_SUPPORT; if (p && p->fs_ops && p->fs_ops->fs_resize) { Value v_fs = params["fs"]; Value v_size = params["new_size_bytes"]; if (IS_CLASS_FILE_SYSTEM(v_fs) && Value::numeric_t == v_size.valueType() && LSM_FLAG_EXPECTED_TYPE(params)) { lsm_fs *fs = value_to_fs(v_fs); if (fs) { uint64_t size_bytes = v_size.asUint64_t(); lsm_fs *rfs = NULL; char *job = NULL; rc = p->fs_ops->fs_resize(p, fs, size_bytes, &rfs, &job, LSM_FLAG_GET_VALUE(params)); std::vector < Value > r; if (LSM_ERR_OK == rc) { r.push_back(Value()); r.push_back(fs_to_value(rfs)); response = Value(r); lsm_fs_record_free(rfs); } else if (LSM_ERR_JOB_STARTED == rc) { r.push_back(Value(job)); r.push_back(Value()); response = Value(r); free(job); } lsm_fs_record_free(fs); } else { rc = LSM_ERR_NO_MEMORY; } } else { rc = LSM_ERR_TRANSPORT_INVALID_ARG; } } return rc; } static int fs_clone(lsm_plugin_ptr p, Value & params, Value & response) { int rc = LSM_ERR_NO_SUPPORT; if (p && p->fs_ops && p->fs_ops->fs_clone) { Value v_src_fs = params["src_fs"]; Value v_name = params["dest_fs_name"]; Value v_ss = params["snapshot"]; /* This is optional */ if (IS_CLASS_FILE_SYSTEM(v_src_fs) && Value::string_t == v_name.valueType() && (Value::null_t == v_ss.valueType() || Value::object_t == v_ss.valueType()) && LSM_FLAG_EXPECTED_TYPE(params)) { lsm_fs *clonedFs = NULL; char *job = NULL; lsm_fs *fs = value_to_fs(v_src_fs); const char *name = v_name.asC_str(); lsm_fs_ss *ss = (Value::null_t == v_ss.valueType())? NULL : value_to_ss(v_ss); if (fs && ((ss && v_ss.valueType() == Value::object_t) || (!ss && v_ss.valueType() == Value::null_t))) { rc = p->fs_ops->fs_clone(p, fs, name, &clonedFs, ss, &job, LSM_FLAG_GET_VALUE(params)); std::vector < Value > r; if (LSM_ERR_OK == rc) { r.push_back(Value()); r.push_back(fs_to_value(clonedFs)); response = Value(r); lsm_fs_record_free(clonedFs); } else if (LSM_ERR_JOB_STARTED == rc) { r.push_back(Value(job)); r.push_back(Value()); response = Value(r); free(job); } } else { rc = LSM_ERR_NO_MEMORY; } lsm_fs_record_free(fs); lsm_fs_ss_record_free(ss); } else { rc = LSM_ERR_TRANSPORT_INVALID_ARG; } } return rc; } static int fs_file_clone(lsm_plugin_ptr p, Value & params, Value & response) { int rc = LSM_ERR_OK; if (p && p->fs_ops && p->fs_ops->fs_file_clone) { Value v_fs = params["fs"]; Value v_src_name = params["src_file_name"]; Value v_dest_name = params["dest_file_name"]; Value v_ss = params["snapshot"]; /* This is optional */ if (IS_CLASS_FILE_SYSTEM(v_fs) && Value::string_t == v_src_name.valueType() && Value::string_t == v_dest_name.valueType() && (Value::null_t == v_ss.valueType() || Value::object_t == v_ss.valueType()) && LSM_FLAG_EXPECTED_TYPE(params)) { lsm_fs *fs = value_to_fs(v_fs); lsm_fs_ss *ss = (Value::null_t == v_ss.valueType())? NULL : value_to_ss(v_ss); if (fs && ((ss && v_ss.valueType() == Value::object_t) || (!ss && v_ss.valueType() == Value::null_t))) { const char *src = v_src_name.asC_str(); const char *dest = v_dest_name.asC_str(); char *job = NULL; rc = p->fs_ops->fs_file_clone(p, fs, src, dest, ss, &job, LSM_FLAG_GET_VALUE(params)); if (LSM_ERR_JOB_STARTED == rc) { response = Value(job); free(job); } } else { rc = LSM_ERR_NO_MEMORY; } lsm_fs_record_free(fs); lsm_fs_ss_record_free(ss); } else { rc = LSM_ERR_TRANSPORT_INVALID_ARG; } } return rc; } static int fs_child_dependency(lsm_plugin_ptr p, Value & params, Value & response) { int rc = LSM_ERR_NO_SUPPORT; if (p && p->fs_ops && p->fs_ops->fs_child_dependency) { Value v_fs = params["fs"]; Value v_files = params["files"]; if (IS_CLASS_FILE_SYSTEM(v_fs) && (Value::array_t == v_files.valueType() || Value::null_t == v_files.valueType()) && LSM_FLAG_EXPECTED_TYPE(params)) { lsm_fs *fs = value_to_fs(v_fs); lsm_string_list *files = (Value::null_t == v_files.valueType())? NULL : value_to_string_list(v_files); if (fs && (files || (!files && Value::null_t == v_files.valueType()))) { uint8_t yes = 0; rc = p->fs_ops->fs_child_dependency(p, fs, files, &yes); if (LSM_ERR_OK == rc) { response = Value((bool) yes); } } else { rc = LSM_ERR_NO_MEMORY; } lsm_fs_record_free(fs); lsm_string_list_free(files); } else { rc = LSM_ERR_TRANSPORT_INVALID_ARG; } } return rc; } static int fs_child_dependency_rm(lsm_plugin_ptr p, Value & params, Value & response) { int rc = LSM_ERR_NO_SUPPORT; if (p && p->fs_ops && p->fs_ops->fs_child_dependency_rm) { Value v_fs = params["fs"]; Value v_files = params["files"]; if (IS_CLASS_FILE_SYSTEM(v_fs) && (Value::array_t == v_files.valueType() || Value::null_t == v_files.valueType()) && LSM_FLAG_EXPECTED_TYPE(params)) { lsm_fs *fs = value_to_fs(v_fs); lsm_string_list *files = (Value::null_t == v_files.valueType())? NULL : value_to_string_list(v_files); if (fs && (files || (!files && Value::null_t == v_files.valueType()))) { char *job = NULL; rc = p->fs_ops->fs_child_dependency_rm(p, fs, files, &job, LSM_FLAG_GET_VALUE (params)); if (LSM_ERR_JOB_STARTED == rc) { response = Value(job); free(job); } } else { rc = LSM_ERR_NO_MEMORY; } lsm_fs_record_free(fs); lsm_string_list_free(files); } else { rc = LSM_ERR_TRANSPORT_INVALID_ARG; } } return rc; } static int ss_list(lsm_plugin_ptr p, Value & params, Value & response) { int rc = LSM_ERR_NO_SUPPORT; if (p && p->fs_ops && p->fs_ops->fs_ss_list) { Value v_fs = params["fs"]; if (IS_CLASS_FILE_SYSTEM(v_fs) && LSM_FLAG_EXPECTED_TYPE(params)) { lsm_fs *fs = value_to_fs(v_fs); if (fs) { lsm_fs_ss **ss = NULL; uint32_t count = 0; rc = p->fs_ops->fs_ss_list(p, fs, &ss, &count, LSM_FLAG_GET_VALUE(params)); if (LSM_ERR_OK == rc) { std::vector < Value > result; result.reserve(count); for (uint32_t i = 0; i < count; ++i) { result.push_back(ss_to_value(ss[i])); } response = Value(result); lsm_fs_record_free(fs); fs = NULL; lsm_fs_ss_record_array_free(ss, count); ss = NULL; } } } else { rc = LSM_ERR_TRANSPORT_INVALID_ARG; } } return rc; } static int ss_create(lsm_plugin_ptr p, Value & params, Value & response) { int rc = LSM_ERR_NO_SUPPORT; if (p && p->fs_ops && p->fs_ops->fs_ss_create) { Value v_fs = params["fs"]; Value v_ss_name = params["snapshot_name"]; if (IS_CLASS_FILE_SYSTEM(v_fs) && Value::string_t == v_ss_name.valueType() && LSM_FLAG_EXPECTED_TYPE(params)) { lsm_fs *fs = value_to_fs(v_fs); if (fs) { lsm_fs_ss *ss = NULL; char *job = NULL; const char *name = v_ss_name.asC_str(); rc = p->fs_ops->fs_ss_create(p, fs, name, &ss, &job, LSM_FLAG_GET_VALUE(params)); std::vector < Value > r; if (LSM_ERR_OK == rc) { r.push_back(Value()); r.push_back(ss_to_value(ss)); response = Value(r); lsm_fs_ss_record_free(ss); } else if (LSM_ERR_JOB_STARTED == rc) { r.push_back(Value(job)); r.push_back(Value()); response = Value(r); free(job); } } else { rc = LSM_ERR_NO_MEMORY; } lsm_fs_record_free(fs); } else { rc = LSM_ERR_TRANSPORT_INVALID_ARG; } } return rc; } static int ss_delete(lsm_plugin_ptr p, Value & params, Value & response) { int rc = LSM_ERR_NO_SUPPORT; if (p && p->fs_ops && p->fs_ops->fs_ss_delete) { Value v_fs = params["fs"]; Value v_ss = params["snapshot"]; if (IS_CLASS_FILE_SYSTEM(v_fs) && IS_CLASS_FS_SNAPSHOT(v_ss) && LSM_FLAG_EXPECTED_TYPE(params)) { lsm_fs *fs = value_to_fs(v_fs); lsm_fs_ss *ss = value_to_ss(v_ss); if (fs && ss) { char *job = NULL; rc = p->fs_ops->fs_ss_delete(p, fs, ss, &job, LSM_FLAG_GET_VALUE(params)); if (LSM_ERR_JOB_STARTED == rc) { response = Value(job); free(job); } } else { rc = LSM_ERR_NO_MEMORY; } lsm_fs_record_free(fs); lsm_fs_ss_record_free(ss); } else { rc = LSM_ERR_TRANSPORT_INVALID_ARG; } } return rc; } static int ss_restore(lsm_plugin_ptr p, Value & params, Value & response) { int rc = LSM_ERR_NO_SUPPORT; if (p && p->fs_ops && p->fs_ops->fs_ss_restore) { Value v_fs = params["fs"]; Value v_ss = params["snapshot"]; Value v_files = params["files"]; Value v_restore_files = params["restore_files"]; Value v_all_files = params["all_files"]; if (IS_CLASS_FILE_SYSTEM(v_fs) && IS_CLASS_FS_SNAPSHOT(v_ss) && (Value::array_t == v_files.valueType() || Value::null_t == v_files.valueType()) && (Value::array_t == v_restore_files.valueType() || Value::null_t == v_restore_files.valueType()) && Value::boolean_t == v_all_files.valueType() && LSM_FLAG_EXPECTED_TYPE(params)) { char *job = NULL; lsm_fs *fs = value_to_fs(v_fs); lsm_fs_ss *ss = value_to_ss(v_ss); lsm_string_list *files = (Value::null_t == v_files.valueType())? NULL : value_to_string_list(v_files); lsm_string_list *restore_files = (Value::null_t == v_restore_files.valueType())? NULL : value_to_string_list(v_restore_files); int all_files = (v_all_files.asBool())? 1 : 0; if (fs && ss && (files || (!files && Value::null_t == v_files.valueType())) && (restore_files || (!restore_files && Value::null_t == v_restore_files.valueType()))) { rc = p->fs_ops->fs_ss_restore(p, fs, ss, files, restore_files, all_files, &job, LSM_FLAG_GET_VALUE(params)); if (LSM_ERR_JOB_STARTED == rc) { response = Value(job); free(job); } } else { rc = LSM_ERR_NO_MEMORY; } lsm_fs_record_free(fs); lsm_fs_ss_record_free(ss); lsm_string_list_free(files); lsm_string_list_free(restore_files); } else { rc = LSM_ERR_TRANSPORT_INVALID_ARG; } } return rc; } static int export_auth(lsm_plugin_ptr p, Value & params, Value & response) { int rc = LSM_ERR_NO_SUPPORT; if (p && p->nas_ops && p->nas_ops->nfs_auth_types) { lsm_string_list *types = NULL; if (LSM_FLAG_EXPECTED_TYPE(params)) { rc = p->nas_ops->nfs_auth_types(p, &types, LSM_FLAG_GET_VALUE(params)); if (LSM_ERR_OK == rc) { response = string_list_to_value(types); lsm_string_list_free(types); } } else { rc = LSM_ERR_TRANSPORT_INVALID_ARG; } } return rc; } static int exports(lsm_plugin_ptr p, Value & params, Value & response) { int rc = LSM_ERR_NO_SUPPORT; char *key = NULL; char *val = NULL; if (p && p->nas_ops && p->nas_ops->nfs_list) { lsm_nfs_export **exports = NULL; uint32_t count = 0; if (LSM_FLAG_EXPECTED_TYPE(params) && (rc = get_search_params(params, &key, &val)) == LSM_ERR_OK) { rc = p->nas_ops->nfs_list(p, key, val, &exports, &count, LSM_FLAG_GET_VALUE(params)); if (LSM_ERR_OK == rc) { std::vector < Value > result; result.reserve(count); for (uint32_t i = 0; i < count; ++i) { result.push_back(nfs_export_to_value(exports[i])); } response = Value(result); lsm_nfs_export_record_array_free(exports, count); exports = NULL; count = 0; } free(key); free(val); } else { if (rc == LSM_ERR_NO_SUPPORT) { rc = LSM_ERR_TRANSPORT_INVALID_ARG; } } } return rc; } static int64_t get_uid_gid(Value & id) { if (Value::null_t == id.valueType()) { return ANON_UID_GID_NA; } else { return id.asInt64_t(); } } static int export_fs(lsm_plugin_ptr p, Value & params, Value & response) { int rc = LSM_ERR_NO_SUPPORT; if (p && p->nas_ops && p->nas_ops->nfs_export) { Value v_fs_id = params["fs_id"]; Value v_export_path = params["export_path"]; Value v_root_list = params["root_list"]; Value v_rw_list = params["rw_list"]; Value v_ro_list = params["ro_list"]; Value v_auth_type = params["auth_type"]; Value v_options = params["options"]; Value v_anon_uid = params["anon_uid"]; Value v_anon_gid = params["anon_gid"]; if (Value::string_t == v_fs_id.valueType() && (Value::string_t == v_export_path.valueType() || Value::null_t == v_export_path.valueType()) && Value::array_t == v_root_list.valueType() && Value::array_t == v_rw_list.valueType() && Value::array_t == v_ro_list.valueType() && (Value::string_t == v_auth_type.valueType() || Value::null_t == v_auth_type.valueType()) && (Value::string_t == v_options.valueType() || Value::null_t == v_options.valueType()) && Value::numeric_t == v_anon_uid.valueType() && Value::numeric_t == v_anon_gid.valueType() && LSM_FLAG_EXPECTED_TYPE(params)) { lsm_string_list *root_list = value_to_string_list(v_root_list); lsm_string_list *rw_list = value_to_string_list(v_rw_list); lsm_string_list *ro_list = value_to_string_list(v_ro_list); if (root_list && rw_list && ro_list) { const char *fs_id = v_fs_id.asC_str(); const char *export_path = v_export_path.asC_str(); const char *auth_type = v_auth_type.asC_str(); const char *options = v_options.asC_str(); lsm_nfs_export *exported = NULL; int64_t anon_uid = get_uid_gid(v_anon_uid); int64_t anon_gid = get_uid_gid(v_anon_gid); rc = p->nas_ops->nfs_export(p, fs_id, export_path, root_list, rw_list, ro_list, anon_uid, anon_gid, auth_type, options, &exported, LSM_FLAG_GET_VALUE(params)); if (LSM_ERR_OK == rc) { response = nfs_export_to_value(exported); lsm_nfs_export_record_free(exported); } } else { rc = LSM_ERR_NO_MEMORY; } lsm_string_list_free(root_list); lsm_string_list_free(rw_list); lsm_string_list_free(ro_list); } else { rc = LSM_ERR_TRANSPORT_INVALID_ARG; } } return rc; } static int export_remove(lsm_plugin_ptr p, Value & params, Value & response) { int rc = LSM_ERR_NO_SUPPORT; if (p && p->nas_ops && p->nas_ops->nfs_export_remove) { Value v_export = params["export"]; if (IS_CLASS_FS_EXPORT(v_export) && LSM_FLAG_EXPECTED_TYPE(params)) { lsm_nfs_export *exp = value_to_nfs_export(v_export); if (exp) { rc = p->nas_ops->nfs_export_remove(p, exp, LSM_FLAG_GET_VALUE(params)); lsm_nfs_export_record_free(exp); exp = NULL; } else { rc = LSM_ERR_NO_MEMORY; } } else { rc = LSM_ERR_TRANSPORT_INVALID_ARG; } } return rc; } static int iscsi_chap(lsm_plugin_ptr p, Value & params, Value & response) { int rc = LSM_ERR_NO_SUPPORT; if (p && p->san_ops && p->san_ops->iscsi_chap_auth) { Value v_init = params["init_id"]; Value v_in_user = params["in_user"]; Value v_in_password = params["in_password"]; Value v_out_user = params["out_user"]; Value v_out_password = params["out_password"]; if (Value::string_t == v_init.valueType() && (Value::string_t == v_in_user.valueType() || Value::null_t == v_in_user.valueType()) && (Value::string_t == v_in_password.valueType() || Value::null_t == v_in_password.valueType()) && (Value::string_t == v_out_user.valueType() || Value::null_t == v_out_user.valueType()) && (Value::string_t == v_out_password.valueType() || Value::null_t == v_out_password.valueType()) && LSM_FLAG_EXPECTED_TYPE(params)) { rc = p->san_ops->iscsi_chap_auth(p, v_init.asC_str(), v_in_user.asC_str(), v_in_password.asC_str(), v_out_user.asC_str(), v_out_password.asC_str(), LSM_FLAG_GET_VALUE(params)); } else { rc = LSM_ERR_TRANSPORT_INVALID_ARG; } } return rc; } static int handle_volume_raid_create_cap_get(lsm_plugin_ptr p, Value & params, Value & response) { int rc = LSM_ERR_NO_SUPPORT; if (p && p->ops_v1_2 && p->ops_v1_2->vol_create_raid_cap_get) { Value v_system = params["system"]; if (IS_CLASS_SYSTEM(v_system) && LSM_FLAG_EXPECTED_TYPE(params)) { uint32_t *supported_raid_types = NULL; uint32_t supported_raid_type_count = 0; uint32_t *supported_strip_sizes = NULL; uint32_t supported_strip_size_count = 0; lsm_system *sys = value_to_system(v_system); if (sys) { rc = p->ops_v1_2->vol_create_raid_cap_get (p, sys, &supported_raid_types, &supported_raid_type_count, &supported_strip_sizes, &supported_strip_size_count, LSM_FLAG_GET_VALUE (params)); if (LSM_ERR_OK == rc) { std::vector < Value > result; result.push_back(uint32_array_to_value (supported_raid_types, supported_raid_type_count)); result.push_back(uint32_array_to_value (supported_strip_sizes, supported_strip_size_count)); response = Value(result); free(supported_raid_types); free(supported_strip_sizes); } } else { rc = LSM_ERR_NO_MEMORY; } lsm_system_record_free(sys); } else { rc = LSM_ERR_TRANSPORT_INVALID_ARG; } } return rc; } static int handle_volume_raid_create(lsm_plugin_ptr p, Value & params, Value & response) { int rc = LSM_ERR_NO_SUPPORT; if (p && p->ops_v1_2 && p->ops_v1_2->vol_create_raid) { Value v_name = params["name"]; Value v_raid_type = params["raid_type"]; Value v_strip_size = params["strip_size"]; Value v_disks = params["disks"]; if (Value::string_t == v_name.valueType() && Value::numeric_t == v_raid_type.valueType() && Value::numeric_t == v_strip_size.valueType() && Value::array_t == v_disks.valueType() && LSM_FLAG_EXPECTED_TYPE(params)) { lsm_disk **disks = NULL; uint32_t disk_count = 0; rc = value_array_to_disks(v_disks, &disks, &disk_count); if (LSM_ERR_OK != rc) { lsm_disk_record_array_free(disks, disk_count); return rc; } const char *name = v_name.asC_str(); lsm_volume_raid_type raid_type = (lsm_volume_raid_type) v_raid_type.asInt32_t(); uint32_t strip_size = v_strip_size.asUint32_t(); lsm_volume *new_vol = NULL; rc = p->ops_v1_2->vol_create_raid(p, name, raid_type, disks, disk_count, strip_size, &new_vol, LSM_FLAG_GET_VALUE(params)); if (LSM_ERR_OK == rc) { response = volume_to_value(new_vol); lsm_volume_record_free(new_vol); } lsm_disk_record_array_free(disks, disk_count); } else { rc = LSM_ERR_TRANSPORT_INVALID_ARG; } } return rc; } /** * map of function pointers */ static std::map < std::string, handler > dispatch = static_map < std::string, handler > ("access_group_initiator_add", ag_initiator_add) ("access_group_create", ag_create) ("access_group_delete", ag_delete) ("access_group_initiator_delete", ag_initiator_del) ("volume_mask", volume_mask) ("access_groups", ag_list) ("volume_unmask", volume_unmask) ("access_groups_granted_to_volume", ag_granted_to_volume) ("capabilities", capabilities) ("disks", handle_disks) ("export_auth", export_auth) ("export_fs", export_fs) ("export_remove", export_remove) ("exports", exports) ("fs_file_clone", fs_file_clone) ("fs_child_dependency", fs_child_dependency) ("fs_child_dependency_rm", fs_child_dependency_rm) ("fs_clone", fs_clone) ("fs_create", fs_create) ("fs_delete", fs_delete) ("fs", fs) ("fs_resize", fs_resize) ("fs_snapshot_create", ss_create) ("fs_snapshot_delete", ss_delete) ("fs_snapshot_restore", ss_restore) ("fs_snapshots", ss_list) ("time_out_get", handle_get_time_out) ("iscsi_chap_auth", iscsi_chap) ("job_free", handle_job_free) ("job_status", handle_job_status) ("plugin_info", handle_plugin_info) ("pools", handle_pools) ("target_ports", handle_target_ports) ("time_out_set", handle_set_time_out) ("plugin_unregister", handle_unregister) ("plugin_register", handle_register) ("systems", handle_system_list) ("volume_child_dependency_rm", volume_dependency_rm) ("volume_child_dependency", volume_dependency) ("volume_create", handle_volume_create) ("volume_delete", handle_volume_delete) ("volume_disable", handle_volume_disable) ("volume_enable", handle_volume_enable) ("volume_replicate", handle_volume_replicate) ("volume_replicate_range_block_size", handle_volume_replicate_range_block_size) ("volume_replicate_range", handle_volume_replicate_range) ("volume_resize", handle_volume_resize) ("volumes_accessible_by_access_group", vol_accessible_by_ag) ("volumes", handle_volumes) ("volume_raid_info", handle_volume_raid_info) ("pool_member_info", handle_pool_member_info) ("volume_raid_create", handle_volume_raid_create) ("volume_raid_create_cap_get", handle_volume_raid_create_cap_get); static int process_request(lsm_plugin_ptr p, const std::string & method, Value & request, Value & response) { int rc = LSM_ERR_LIB_BUG; response = Value(); //Default response will be null if (dispatch.find(method) != dispatch.end()) { rc = (dispatch[method]) (p, request["params"], response); } else { rc = LSM_ERR_NO_SUPPORT; } return rc; } static int lsm_plugin_run(lsm_plugin_ptr p) { int rc = 0; lsm_flag flags = 0; if (LSM_IS_PLUGIN(p)) { while (true) { try { if (!LSM_IS_PLUGIN(p)) { syslog(LOG_USER | LOG_NOTICE, "Someone stepped on " "plugin pointer, exiting!"); break; } Value req = p->tp->readRequest(); Value resp; if (req.isValidRequest()) { std::string method = req["method"].asString(); rc = process_request(p, method, req, resp); if (LSM_ERR_OK == rc || LSM_ERR_JOB_STARTED == rc) { p->tp->responseSend(resp); } else { error_send(p, rc); } if (method == "plugin_unregister") { flags = LSM_FLAG_GET_VALUE(req["params"]); break; } } else { syslog(LOG_USER | LOG_NOTICE, "Invalid request"); break; } } catch(EOFException & eof) { break; } catch(ValueException & ve) { syslog(LOG_USER | LOG_NOTICE, "Plug-in exception: %s", ve.what()); rc = 1; break; } catch(LsmException & le) { syslog(LOG_USER | LOG_NOTICE, "Plug-in exception: %s", le.what()); rc = 2; break; } catch( ...) { syslog(LOG_USER | LOG_NOTICE, "Plug-in un-handled exception"); rc = 3; break; } } lsm_plugin_free(p, flags); p = NULL; } else { rc = LSM_ERR_INVALID_ARGUMENT; } return rc; } int lsm_log_error_basic(lsm_plugin_ptr plug, lsm_error_number code, const char *msg) { if (!LSM_IS_PLUGIN(plug)) { return LSM_ERR_INVALID_ARGUMENT; } lsm_error_ptr e = LSM_ERROR_CREATE_PLUGIN_MSG(code, msg); if (e) { int rc = lsm_plugin_error_log(plug, e); if (LSM_ERR_OK != rc) { syslog(LOG_USER | LOG_NOTICE, "Plug-in error %d while reporting an error, code= %d, " "msg= %s", rc, code, msg); } } return (int) code; } int lsm_plugin_error_log(lsm_plugin_ptr plug, lsm_error_ptr error) { if (!LSM_IS_PLUGIN(plug) || !LSM_IS_ERROR(error)) { return LSM_ERR_INVALID_ARGUMENT; } if (plug->error) { lsm_error_free(plug->error); } plug->error = error; return LSM_ERR_OK; } #define STR_D(c, s) \ do { \ if(s) { \ (c) = strdup(s); \ if( !c ) {\ rc = LSM_ERR_NO_MEMORY; \ goto bail; \ } \ } \ } while(0)\ int LSM_DLL_EXPORT lsm_uri_parse(const char *uri, char **scheme, char **user, char **server, int *port, char **path, lsm_hash ** query_params) { int rc = LSM_ERR_INVALID_ARGUMENT; xmlURIPtr u = NULL; if (uri && strlen(uri) > 0) { *scheme = NULL; *user = NULL; *server = NULL; *port = -1; *path = NULL; *query_params = NULL; u = xmlParseURI(uri); if (u) { STR_D(*scheme, u->scheme); STR_D(*user, u->user); STR_D(*server, u->server); STR_D(*path, u->path); *port = u->port; *query_params = lsm_hash_alloc(); if (*query_params) { int i; struct qparam_set *qp = NULL; qp = qparam_query_parse(u->query_raw); if (qp) { for (i = 0; i < qp->n; ++i) { rc = lsm_hash_string_set(*query_params, qp->p[i].name, qp->p[i].value); if (LSM_ERR_OK != rc) { free_qparam_set(qp); goto bail; } } free_qparam_set(qp); } } else { rc = LSM_ERR_NO_MEMORY; goto bail; } rc = LSM_ERR_OK; } bail: if (rc != LSM_ERR_OK) { free(*scheme); *scheme = NULL; free(*user); *user = NULL; free(*server); *server = NULL; *port = -1; free(*path); *path = NULL; lsm_hash_free(*query_params); *query_params = NULL; } if (u) { xmlFreeURI(u); u = NULL; } } return rc; } typedef int (*array_cmp) (void *item, void *cmp_data); typedef void (*free_item) (void *item); #define CMP_FUNCTION(name, method, method_type) \ static int name(void *i, void *d) \ { \ method_type *v = (method_type *)i; \ char *val = (char *)d; \ \ if( strcmp(method(v), val) == 0 ) { \ return 1; \ } \ return 0; \ } \ #define CMP_FREE_FUNCTION(name, method, method_type) \ static void name(void *i) \ { \ method((method_type *)i); \ } \ static int filter(void *a[], size_t size, array_cmp cmp, void *cmp_data, free_item fo) { int remaining = 0; size_t i = 0; for (i = 0; i < size; ++i) { if (cmp(a[i], cmp_data)) { memmove(&a[remaining], &a[i], sizeof(void *)); remaining += 1; } else { fo(a[i]); a[i] = NULL; } } return remaining; } CMP_FUNCTION(volume_compare_id, lsm_volume_id_get, lsm_volume) CMP_FUNCTION(volume_compare_system, lsm_volume_system_id_get, lsm_volume) CMP_FUNCTION(volume_compare_pool, lsm_volume_pool_id_get, lsm_volume) CMP_FREE_FUNCTION(volume_free, lsm_volume_record_free, lsm_volume) void lsm_plug_volume_search_filter(const char *search_key, const char *search_value, lsm_volume * vols[], uint32_t * count) { array_cmp cmp = NULL; if (search_key) { if (0 == strcmp("id", search_key)) { cmp = volume_compare_id; } else if (0 == strcmp("system_id", search_key)) { cmp = volume_compare_system; } else if (0 == strcmp("pool_id", search_key)) { cmp = volume_compare_pool; } if (cmp) { *count = filter((void **) vols, *count, cmp, (void *) search_value, volume_free); } } } CMP_FUNCTION(pool_compare_id, lsm_pool_id_get, lsm_pool) CMP_FUNCTION(pool_compare_system, lsm_pool_system_id_get, lsm_pool) CMP_FREE_FUNCTION(pool_free, lsm_pool_record_free, lsm_pool); void lsm_plug_pool_search_filter(const char *search_key, const char *search_value, lsm_pool * pools[], uint32_t * count) { array_cmp cmp = NULL; if (search_key) { if (0 == strcmp("id", search_key)) { cmp = pool_compare_id; } else if (0 == strcmp("system_id", search_key)) { cmp = pool_compare_system; } if (cmp) { *count = filter((void **) pools, *count, cmp, (void *) search_value, pool_free); } } } CMP_FUNCTION(disk_compare_id, lsm_disk_id_get, lsm_disk) CMP_FUNCTION(disk_compare_system, lsm_disk_system_id_get, lsm_disk) CMP_FREE_FUNCTION(disk_free, lsm_disk_record_free, lsm_disk) void lsm_plug_disk_search_filter(const char *search_key, const char *search_value, lsm_disk * disks[], uint32_t * count) { array_cmp cmp = NULL; if (search_key) { if (0 == strcmp("id", search_key)) { cmp = disk_compare_id; } else if (0 == strcmp("system_id", search_key)) { cmp = disk_compare_system; } if (cmp) { *count = filter((void **) disks, *count, cmp, (void *) search_value, disk_free); } } } CMP_FUNCTION(access_group_compare_id, lsm_access_group_id_get, lsm_access_group) CMP_FUNCTION(access_group_compare_system, lsm_access_group_system_id_get, lsm_access_group) CMP_FREE_FUNCTION(access_group_free, lsm_access_group_record_free, lsm_access_group); void lsm_plug_access_group_search_filter(const char *search_key, const char *search_value, lsm_access_group * ag[], uint32_t * count) { array_cmp cmp = NULL; if (search_key) { if (0 == strcmp("id", search_key)) { cmp = access_group_compare_id; } else if (0 == strcmp("system_id", search_key)) { cmp = access_group_compare_system; } if (cmp) { *count = filter((void **) ag, *count, cmp, (void *) search_value, access_group_free); } } } CMP_FUNCTION(fs_compare_id, lsm_fs_id_get, lsm_fs) CMP_FUNCTION(fs_compare_system, lsm_fs_system_id_get, lsm_fs) CMP_FREE_FUNCTION(fs_free, lsm_fs_record_free, lsm_fs); void lsm_plug_fs_search_filter(const char *search_key, const char *search_value, lsm_fs * fs[], uint32_t * count) { array_cmp cmp = NULL; if (search_key) { if (0 == strcmp("id", search_key)) { cmp = fs_compare_id; } else if (0 == strcmp("system_id", search_key)) { cmp = fs_compare_system; } if (cmp) { *count = filter((void **) fs, *count, cmp, (void *) search_value, fs_free); } } } CMP_FUNCTION(nfs_compare_id, lsm_nfs_export_id_get, lsm_nfs_export) CMP_FUNCTION(nfs_compare_fs_id, lsm_nfs_export_fs_id_get, lsm_nfs_export) CMP_FREE_FUNCTION(nfs_free, lsm_nfs_export_record_free, lsm_nfs_export) void lsm_plug_nfs_export_search_filter(const char *search_key, const char *search_value, lsm_nfs_export * exports[], uint32_t * count) { array_cmp cmp = NULL; if (search_key) { if (0 == strcmp("id", search_key)) { cmp = nfs_compare_id; } else if (0 == strcmp("fs_id", search_key)) { cmp = nfs_compare_fs_id; } if (cmp) { *count = filter((void **) exports, *count, cmp, (void *) search_value, nfs_free); } } } CMP_FUNCTION(tp_compare_id, lsm_target_port_id_get, lsm_target_port) CMP_FUNCTION(tp_compare_system_id, lsm_target_port_system_id_get, lsm_target_port) CMP_FREE_FUNCTION(tp_free, lsm_target_port_record_free, lsm_target_port) void lsm_plug_target_port_search_filter(const char *search_key, const char *search_value, lsm_target_port * tp[], uint32_t * count) { array_cmp cmp = NULL; if (search_key) { if (0 == strcmp("id", search_key)) { cmp = tp_compare_id; } else if (0 == strcmp("system_id", search_key)) { cmp = tp_compare_system_id; } if (cmp) { *count = filter((void **) tp, *count, cmp, (void *) search_value, tp_free); } } } libstoragemgmt-1.2.3/c_binding/Makefile.in0000664000175000017500000007177612542455445015475 00000000000000# Makefile.in generated by automake 1.13.4 from Makefile.am. # @configure_input@ # Copyright (C) 1994-2013 Free Software Foundation, Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__is_gnu_make = test -n '$(MAKEFILE_LIST)' && test -n '$(MAKELEVEL)' am__make_running_with_option = \ case $${target_option-} in \ ?) ;; \ *) echo "am__make_running_with_option: internal error: invalid" \ "target option '$${target_option-}' specified" >&2; \ exit 1;; \ esac; \ has_opt=no; \ sane_makeflags=$$MAKEFLAGS; \ if $(am__is_gnu_make); then \ sane_makeflags=$$MFLAGS; \ else \ case $$MAKEFLAGS in \ *\\[\ \ ]*) \ bs=\\; \ sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \ | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \ esac; \ fi; \ skip_next=no; \ strip_trailopt () \ { \ flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \ }; \ for flg in $$sane_makeflags; do \ test $$skip_next = yes && { skip_next=no; continue; }; \ case $$flg in \ *=*|--*) continue;; \ -*I) strip_trailopt 'I'; skip_next=yes;; \ -*I?*) strip_trailopt 'I';; \ -*O) strip_trailopt 'O'; skip_next=yes;; \ -*O?*) strip_trailopt 'O';; \ -*l) strip_trailopt 'l'; skip_next=yes;; \ -*l?*) strip_trailopt 'l';; \ -[dEDm]) skip_next=yes;; \ -[JT]) skip_next=yes;; \ esac; \ case $$flg in \ *$$target_option*) has_opt=yes; break;; \ esac; \ done; \ test $$has_opt = yes am__make_dryrun = (target_option=n; $(am__make_running_with_option)) am__make_keepgoing = (target_option=k; $(am__make_running_with_option)) pkgdatadir = $(datadir)/@PACKAGE@ pkgincludedir = $(includedir)/@PACKAGE@ pkglibdir = $(libdir)/@PACKAGE@ pkglibexecdir = $(libexecdir)/@PACKAGE@ am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ subdir = c_binding DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/Makefile.am \ $(top_srcdir)/build-aux/depcomp ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = $(top_srcdir)/m4/ax_python_module.m4 \ $(top_srcdir)/m4/libtool.m4 $(top_srcdir)/m4/ltoptions.m4 \ $(top_srcdir)/m4/ltsugar.m4 $(top_srcdir)/m4/ltversion.m4 \ $(top_srcdir)/m4/lt~obsolete.m4 $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) mkinstalldirs = $(install_sh) -d CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = CONFIG_CLEAN_VPATH_FILES = am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; am__vpath_adj = case $$p in \ $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ *) f=$$p;; \ esac; am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`; am__install_max = 40 am__nobase_strip_setup = \ srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'` am__nobase_strip = \ for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||" am__nobase_list = $(am__nobase_strip_setup); \ for p in $$list; do echo "$$p $$p"; done | \ sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \ $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \ if (++n[$$2] == $(am__install_max)) \ { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \ END { for (dir in files) print dir, files[dir] }' am__base_list = \ sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \ sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g' am__uninstall_files_from_dir = { \ test -z "$$files" \ || { test ! -d "$$dir" && test ! -f "$$dir" && test ! -r "$$dir"; } \ || { echo " ( cd '$$dir' && rm -f" $$files ")"; \ $(am__cd) "$$dir" && rm -f $$files; }; \ } am__installdirs = "$(DESTDIR)$(libdir)" LTLIBRARIES = $(lib_LTLIBRARIES) am__DEPENDENCIES_1 = libstoragemgmt_la_DEPENDENCIES = $(am__DEPENDENCIES_1) \ $(am__DEPENDENCIES_1) $(am__DEPENDENCIES_1) am__dirstamp = $(am__leading_dot)dirstamp am_libstoragemgmt_la_OBJECTS = lsm_mgmt.lo lsm_datatypes.lo \ lsm_convert.lo lsm_ipc.lo lsm_plugin_ipc.lo util/qparams.lo libstoragemgmt_la_OBJECTS = $(am_libstoragemgmt_la_OBJECTS) AM_V_lt = $(am__v_lt_@AM_V@) am__v_lt_ = $(am__v_lt_@AM_DEFAULT_V@) am__v_lt_0 = --silent am__v_lt_1 = libstoragemgmt_la_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX \ $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=link $(CXXLD) \ $(AM_CXXFLAGS) $(CXXFLAGS) $(libstoragemgmt_la_LDFLAGS) \ $(LDFLAGS) -o $@ AM_V_P = $(am__v_P_@AM_V@) am__v_P_ = $(am__v_P_@AM_DEFAULT_V@) am__v_P_0 = false am__v_P_1 = : AM_V_GEN = $(am__v_GEN_@AM_V@) am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@) am__v_GEN_0 = @echo " GEN " $@; am__v_GEN_1 = AM_V_at = $(am__v_at_@AM_V@) am__v_at_ = $(am__v_at_@AM_DEFAULT_V@) am__v_at_0 = @ am__v_at_1 = DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir) depcomp = $(SHELL) $(top_srcdir)/build-aux/depcomp am__depfiles_maybe = depfiles am__mv = mv -f COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \ $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) LTCOMPILE = $(LIBTOOL) $(AM_V_lt) --tag=CC $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) \ $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) \ $(AM_CFLAGS) $(CFLAGS) AM_V_CC = $(am__v_CC_@AM_V@) am__v_CC_ = $(am__v_CC_@AM_DEFAULT_V@) am__v_CC_0 = @echo " CC " $@; am__v_CC_1 = CCLD = $(CC) LINK = $(LIBTOOL) $(AM_V_lt) --tag=CC $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) \ $(AM_LDFLAGS) $(LDFLAGS) -o $@ AM_V_CCLD = $(am__v_CCLD_@AM_V@) am__v_CCLD_ = $(am__v_CCLD_@AM_DEFAULT_V@) am__v_CCLD_0 = @echo " CCLD " $@; am__v_CCLD_1 = CXXCOMPILE = $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) LTCXXCOMPILE = $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) \ $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) \ $(AM_CXXFLAGS) $(CXXFLAGS) AM_V_CXX = $(am__v_CXX_@AM_V@) am__v_CXX_ = $(am__v_CXX_@AM_DEFAULT_V@) am__v_CXX_0 = @echo " CXX " $@; am__v_CXX_1 = CXXLD = $(CXX) CXXLINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CXXLD) $(AM_CXXFLAGS) \ $(CXXFLAGS) $(AM_LDFLAGS) $(LDFLAGS) -o $@ AM_V_CXXLD = $(am__v_CXXLD_@AM_V@) am__v_CXXLD_ = $(am__v_CXXLD_@AM_DEFAULT_V@) am__v_CXXLD_0 = @echo " CXXLD " $@; am__v_CXXLD_1 = SOURCES = $(libstoragemgmt_la_SOURCES) DIST_SOURCES = $(libstoragemgmt_la_SOURCES) RECURSIVE_TARGETS = all-recursive check-recursive cscopelist-recursive \ ctags-recursive dvi-recursive html-recursive info-recursive \ install-data-recursive install-dvi-recursive \ install-exec-recursive install-html-recursive \ install-info-recursive install-pdf-recursive \ install-ps-recursive install-recursive installcheck-recursive \ installdirs-recursive pdf-recursive ps-recursive \ tags-recursive uninstall-recursive am__can_run_installinfo = \ case $$AM_UPDATE_INFO_DIR in \ n|no|NO) false;; \ *) (install-info --version) >/dev/null 2>&1;; \ esac RECURSIVE_CLEAN_TARGETS = mostlyclean-recursive clean-recursive \ distclean-recursive maintainer-clean-recursive am__recursive_targets = \ $(RECURSIVE_TARGETS) \ $(RECURSIVE_CLEAN_TARGETS) \ $(am__extra_recursive_targets) AM_RECURSIVE_TARGETS = $(am__recursive_targets:-recursive=) TAGS CTAGS \ distdir am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) # Read a list of newline-separated strings from the standard input, # and print each of them once, without duplicates. Input order is # *not* preserved. am__uniquify_input = $(AWK) '\ BEGIN { nonempty = 0; } \ { items[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in items) print i; }; } \ ' # Make sure the list of sources is unique. This is necessary because, # e.g., the same source file might be shared among _SOURCES variables # for different programs/libraries. am__define_uniq_tagged_files = \ list='$(am__tagged_files)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | $(am__uniquify_input)` ETAGS = etags CTAGS = ctags DIST_SUBDIRS = $(SUBDIRS) DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) am__relativize = \ dir0=`pwd`; \ sed_first='s,^\([^/]*\)/.*$$,\1,'; \ sed_rest='s,^[^/]*/*,,'; \ sed_last='s,^.*/\([^/]*\)$$,\1,'; \ sed_butlast='s,/*[^/]*$$,,'; \ while test -n "$$dir1"; do \ first=`echo "$$dir1" | sed -e "$$sed_first"`; \ if test "$$first" != "."; then \ if test "$$first" = ".."; then \ dir2=`echo "$$dir0" | sed -e "$$sed_last"`/"$$dir2"; \ dir0=`echo "$$dir0" | sed -e "$$sed_butlast"`; \ else \ first2=`echo "$$dir2" | sed -e "$$sed_first"`; \ if test "$$first2" = "$$first"; then \ dir2=`echo "$$dir2" | sed -e "$$sed_rest"`; \ else \ dir2="../$$dir2"; \ fi; \ dir0="$$dir0"/"$$first"; \ fi; \ fi; \ dir1=`echo "$$dir1" | sed -e "$$sed_rest"`; \ done; \ reldir="$$dir2" ACLOCAL = @ACLOCAL@ AMTAR = @AMTAR@ AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@ AR = @AR@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ FGREP = @FGREP@ GREP = @GREP@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ JSON_CFLAGS = @JSON_CFLAGS@ JSON_LIBS = @JSON_LIBS@ LD = @LD@ LDFLAGS = @LDFLAGS@ LIBCHECK_CFLAGS = @LIBCHECK_CFLAGS@ LIBCHECK_LIBS = @LIBCHECK_LIBS@ LIBCONFIG_CFLAGS = @LIBCONFIG_CFLAGS@ LIBCONFIG_LIBS = @LIBCONFIG_LIBS@ LIBGLIB_CFLAGS = @LIBGLIB_CFLAGS@ LIBGLIB_LIBS = @LIBGLIB_LIBS@ LIBMICROHTTPD_CFLAGS = @LIBMICROHTTPD_CFLAGS@ LIBMICROHTTPD_LIBS = @LIBMICROHTTPD_LIBS@ LIBOBJS = @LIBOBJS@ LIBS = @LIBS@ LIBSM_LIBTOOL_VERSION = @LIBSM_LIBTOOL_VERSION@ LIBSM_MAJOR_VERSION = @LIBSM_MAJOR_VERSION@ LIBSM_MICRO_VERSION = @LIBSM_MICRO_VERSION@ LIBSM_MINOR_VERSION = @LIBSM_MINOR_VERSION@ LIBSM_VERSION = @LIBSM_VERSION@ LIBSM_VERSION_INFO = @LIBSM_VERSION_INFO@ LIBSM_VERSION_NUMBER = @LIBSM_VERSION_NUMBER@ LIBTOOL = @LIBTOOL@ LIBXML_CFLAGS = @LIBXML_CFLAGS@ LIBXML_LIBS = @LIBXML_LIBS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBOBJS = @LTLIBOBJS@ MAINT = @MAINT@ MAKEINFO = @MAKEINFO@ MANIFEST_TOOL = @MANIFEST_TOOL@ MKDIR_P = @MKDIR_P@ NM = @NM@ NMEDIT = @NMEDIT@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_URL = @PACKAGE_URL@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PERL = @PERL@ PKG_CONFIG = @PKG_CONFIG@ PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@ PKG_CONFIG_PATH = @PKG_CONFIG_PATH@ PYTHON = @PYTHON@ PYTHON_EXEC_PREFIX = @PYTHON_EXEC_PREFIX@ PYTHON_PLATFORM = @PYTHON_PLATFORM@ PYTHON_PREFIX = @PYTHON_PREFIX@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SSL_LIBS = @SSL_LIBS@ STRIP = @STRIP@ VERSION = @VERSION@ YAJL_LIBS = @YAJL_LIBS@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_AR = @ac_ct_AR@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ bashcompletiondir = @bashcompletiondir@ bindir = @bindir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ datadir = @datadir@ datarootdir = @datarootdir@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ install_sh = @install_sh@ libdir = @libdir@ libexecdir = @libexecdir@ localedir = @localedir@ localstatedir = @localstatedir@ mandir = @mandir@ mkdir_p = @mkdir_p@ oldincludedir = @oldincludedir@ pdfdir = @pdfdir@ pkgpyexecdir = @pkgpyexecdir@ pkgpythondir = @pkgpythondir@ prefix = @prefix@ program_transform_name = @program_transform_name@ psdir = @psdir@ pyexecdir = @pyexecdir@ pythondir = @pythondir@ sbindir = @sbindir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ systemdsystemunitdir = @systemdsystemunitdir@ target_alias = @target_alias@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ SUBDIRS = include AM_CPPFLAGS = -I$(top_srcdir)/c_binding/include \ -I$(top_builddir)/c_binding/include \ -I@srcdir@/c_binding/include \ $(LIBXML_CFLAGS) $(LIBGLIB_CFLAGS) lib_LTLIBRARIES = libstoragemgmt.la libstoragemgmt_la_LIBADD = $(LIBXML_LIBS) $(YAJL_LIBS) $(LIBGLIB_LIBS) libstoragemgmt_la_LDFLAGS = -version-info $(LIBSM_LIBTOOL_VERSION) libstoragemgmt_la_SOURCES = \ lsm_mgmt.cpp lsm_datatypes.hpp lsm_datatypes.cpp lsm_convert.hpp \ lsm_convert.cpp lsm_ipc.hpp lsm_ipc.cpp lsm_plugin_ipc.hpp \ lsm_plugin_ipc.cpp util/qparams.c util/qparams.h all: all-recursive .SUFFIXES: .SUFFIXES: .c .cpp .lo .o .obj $(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu c_binding/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --gnu c_binding/Makefile .PRECIOUS: Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): install-libLTLIBRARIES: $(lib_LTLIBRARIES) @$(NORMAL_INSTALL) @list='$(lib_LTLIBRARIES)'; test -n "$(libdir)" || list=; \ list2=; for p in $$list; do \ if test -f $$p; then \ list2="$$list2 $$p"; \ else :; fi; \ done; \ test -z "$$list2" || { \ echo " $(MKDIR_P) '$(DESTDIR)$(libdir)'"; \ $(MKDIR_P) "$(DESTDIR)$(libdir)" || exit 1; \ echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL) $(INSTALL_STRIP_FLAG) $$list2 '$(DESTDIR)$(libdir)'"; \ $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL) $(INSTALL_STRIP_FLAG) $$list2 "$(DESTDIR)$(libdir)"; \ } uninstall-libLTLIBRARIES: @$(NORMAL_UNINSTALL) @list='$(lib_LTLIBRARIES)'; test -n "$(libdir)" || list=; \ for p in $$list; do \ $(am__strip_dir) \ echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f '$(DESTDIR)$(libdir)/$$f'"; \ $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f "$(DESTDIR)$(libdir)/$$f"; \ done clean-libLTLIBRARIES: -test -z "$(lib_LTLIBRARIES)" || rm -f $(lib_LTLIBRARIES) @list='$(lib_LTLIBRARIES)'; \ locs=`for p in $$list; do echo $$p; done | \ sed 's|^[^/]*$$|.|; s|/[^/]*$$||; s|$$|/so_locations|' | \ sort -u`; \ test -z "$$locs" || { \ echo rm -f $${locs}; \ rm -f $${locs}; \ } util/$(am__dirstamp): @$(MKDIR_P) util @: > util/$(am__dirstamp) util/$(DEPDIR)/$(am__dirstamp): @$(MKDIR_P) util/$(DEPDIR) @: > util/$(DEPDIR)/$(am__dirstamp) util/qparams.lo: util/$(am__dirstamp) util/$(DEPDIR)/$(am__dirstamp) libstoragemgmt.la: $(libstoragemgmt_la_OBJECTS) $(libstoragemgmt_la_DEPENDENCIES) $(EXTRA_libstoragemgmt_la_DEPENDENCIES) $(AM_V_CXXLD)$(libstoragemgmt_la_LINK) -rpath $(libdir) $(libstoragemgmt_la_OBJECTS) $(libstoragemgmt_la_LIBADD) $(LIBS) mostlyclean-compile: -rm -f *.$(OBJEXT) -rm -f util/*.$(OBJEXT) -rm -f util/*.lo distclean-compile: -rm -f *.tab.c @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/lsm_convert.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/lsm_datatypes.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/lsm_ipc.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/lsm_mgmt.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/lsm_plugin_ipc.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@util/$(DEPDIR)/qparams.Plo@am__quote@ .c.o: @am__fastdepCC_TRUE@ $(AM_V_CC)depbase=`echo $@ | sed 's|[^/]*$$|$(DEPDIR)/&|;s|\.o$$||'`;\ @am__fastdepCC_TRUE@ $(COMPILE) -MT $@ -MD -MP -MF $$depbase.Tpo -c -o $@ $< &&\ @am__fastdepCC_TRUE@ $(am__mv) $$depbase.Tpo $$depbase.Po @AMDEP_TRUE@@am__fastdepCC_FALSE@ $(AM_V_CC)source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCC_FALSE@ $(AM_V_CC@am__nodep@)$(COMPILE) -c -o $@ $< .c.obj: @am__fastdepCC_TRUE@ $(AM_V_CC)depbase=`echo $@ | sed 's|[^/]*$$|$(DEPDIR)/&|;s|\.obj$$||'`;\ @am__fastdepCC_TRUE@ $(COMPILE) -MT $@ -MD -MP -MF $$depbase.Tpo -c -o $@ `$(CYGPATH_W) '$<'` &&\ @am__fastdepCC_TRUE@ $(am__mv) $$depbase.Tpo $$depbase.Po @AMDEP_TRUE@@am__fastdepCC_FALSE@ $(AM_V_CC)source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCC_FALSE@ $(AM_V_CC@am__nodep@)$(COMPILE) -c -o $@ `$(CYGPATH_W) '$<'` .c.lo: @am__fastdepCC_TRUE@ $(AM_V_CC)depbase=`echo $@ | sed 's|[^/]*$$|$(DEPDIR)/&|;s|\.lo$$||'`;\ @am__fastdepCC_TRUE@ $(LTCOMPILE) -MT $@ -MD -MP -MF $$depbase.Tpo -c -o $@ $< &&\ @am__fastdepCC_TRUE@ $(am__mv) $$depbase.Tpo $$depbase.Plo @AMDEP_TRUE@@am__fastdepCC_FALSE@ $(AM_V_CC)source='$<' object='$@' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCC_FALSE@ $(AM_V_CC@am__nodep@)$(LTCOMPILE) -c -o $@ $< .cpp.o: @am__fastdepCXX_TRUE@ $(AM_V_CXX)depbase=`echo $@ | sed 's|[^/]*$$|$(DEPDIR)/&|;s|\.o$$||'`;\ @am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $$depbase.Tpo -c -o $@ $< &&\ @am__fastdepCXX_TRUE@ $(am__mv) $$depbase.Tpo $$depbase.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(CXXCOMPILE) -c -o $@ $< .cpp.obj: @am__fastdepCXX_TRUE@ $(AM_V_CXX)depbase=`echo $@ | sed 's|[^/]*$$|$(DEPDIR)/&|;s|\.obj$$||'`;\ @am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $$depbase.Tpo -c -o $@ `$(CYGPATH_W) '$<'` &&\ @am__fastdepCXX_TRUE@ $(am__mv) $$depbase.Tpo $$depbase.Po @AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(CXXCOMPILE) -c -o $@ `$(CYGPATH_W) '$<'` .cpp.lo: @am__fastdepCXX_TRUE@ $(AM_V_CXX)depbase=`echo $@ | sed 's|[^/]*$$|$(DEPDIR)/&|;s|\.lo$$||'`;\ @am__fastdepCXX_TRUE@ $(LTCXXCOMPILE) -MT $@ -MD -MP -MF $$depbase.Tpo -c -o $@ $< &&\ @am__fastdepCXX_TRUE@ $(am__mv) $$depbase.Tpo $$depbase.Plo @AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='$<' object='$@' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(LTCXXCOMPILE) -c -o $@ $< mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs -rm -rf util/.libs util/_libs # This directory's subdirectories are mostly independent; you can cd # into them and run 'make' without going through this Makefile. # To change the values of 'make' variables: instead of editing Makefiles, # (1) if the variable is set in 'config.status', edit 'config.status' # (which will cause the Makefiles to be regenerated when you run 'make'); # (2) otherwise, pass the desired values on the 'make' command line. $(am__recursive_targets): @fail=; \ if $(am__make_keepgoing); then \ failcom='fail=yes'; \ else \ failcom='exit 1'; \ fi; \ dot_seen=no; \ target=`echo $@ | sed s/-recursive//`; \ case "$@" in \ distclean-* | maintainer-clean-*) list='$(DIST_SUBDIRS)' ;; \ *) list='$(SUBDIRS)' ;; \ esac; \ for subdir in $$list; do \ echo "Making $$target in $$subdir"; \ if test "$$subdir" = "."; then \ dot_seen=yes; \ local_target="$$target-am"; \ else \ local_target="$$target"; \ fi; \ ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \ || eval $$failcom; \ done; \ if test "$$dot_seen" = "no"; then \ $(MAKE) $(AM_MAKEFLAGS) "$$target-am" || exit 1; \ fi; test -z "$$fail" ID: $(am__tagged_files) $(am__define_uniq_tagged_files); mkid -fID $$unique tags: tags-recursive TAGS: tags tags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files) set x; \ here=`pwd`; \ if ($(ETAGS) --etags-include --version) >/dev/null 2>&1; then \ include_option=--etags-include; \ empty_fix=.; \ else \ include_option=--include; \ empty_fix=; \ fi; \ list='$(SUBDIRS)'; for subdir in $$list; do \ if test "$$subdir" = .; then :; else \ test ! -f $$subdir/TAGS || \ set "$$@" "$$include_option=$$here/$$subdir/TAGS"; \ fi; \ done; \ $(am__define_uniq_tagged_files); \ shift; \ if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ if test $$# -gt 0; then \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ "$$@" $$unique; \ else \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ $$unique; \ fi; \ fi ctags: ctags-recursive CTAGS: ctags ctags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files) $(am__define_uniq_tagged_files); \ test -z "$(CTAGS_ARGS)$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$unique GTAGS: here=`$(am__cd) $(top_builddir) && pwd` \ && $(am__cd) $(top_srcdir) \ && gtags -i $(GTAGS_ARGS) "$$here" cscopelist: cscopelist-recursive cscopelist-am: $(am__tagged_files) list='$(am__tagged_files)'; \ case "$(srcdir)" in \ [\\/]* | ?:[\\/]*) sdir="$(srcdir)" ;; \ *) sdir=$(subdir)/$(srcdir) ;; \ esac; \ for i in $$list; do \ if test -f "$$i"; then \ echo "$(subdir)/$$i"; \ else \ echo "$$sdir/$$i"; \ fi; \ done >> $(top_builddir)/cscope.files distclean-tags: -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags distdir: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done @list='$(DIST_SUBDIRS)'; for subdir in $$list; do \ if test "$$subdir" = .; then :; else \ $(am__make_dryrun) \ || test -d "$(distdir)/$$subdir" \ || $(MKDIR_P) "$(distdir)/$$subdir" \ || exit 1; \ dir1=$$subdir; dir2="$(distdir)/$$subdir"; \ $(am__relativize); \ new_distdir=$$reldir; \ dir1=$$subdir; dir2="$(top_distdir)"; \ $(am__relativize); \ new_top_distdir=$$reldir; \ echo " (cd $$subdir && $(MAKE) $(AM_MAKEFLAGS) top_distdir="$$new_top_distdir" distdir="$$new_distdir" \\"; \ echo " am__remove_distdir=: am__skip_length_check=: am__skip_mode_fix=: distdir)"; \ ($(am__cd) $$subdir && \ $(MAKE) $(AM_MAKEFLAGS) \ top_distdir="$$new_top_distdir" \ distdir="$$new_distdir" \ am__remove_distdir=: \ am__skip_length_check=: \ am__skip_mode_fix=: \ distdir) \ || exit 1; \ fi; \ done check-am: all-am check: check-recursive all-am: Makefile $(LTLIBRARIES) installdirs: installdirs-recursive installdirs-am: for dir in "$(DESTDIR)$(libdir)"; do \ test -z "$$dir" || $(MKDIR_P) "$$dir"; \ done install: install-recursive install-exec: install-exec-recursive install-data: install-data-recursive uninstall: uninstall-recursive install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-recursive install-strip: if test -z '$(STRIP)'; then \ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ install; \ else \ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ fi mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) -rm -f util/$(DEPDIR)/$(am__dirstamp) -rm -f util/$(am__dirstamp) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-recursive clean-am: clean-generic clean-libLTLIBRARIES clean-libtool \ mostlyclean-am distclean: distclean-recursive -rm -rf ./$(DEPDIR) util/$(DEPDIR) -rm -f Makefile distclean-am: clean-am distclean-compile distclean-generic \ distclean-tags dvi: dvi-recursive dvi-am: html: html-recursive html-am: info: info-recursive info-am: install-data-am: install-dvi: install-dvi-recursive install-dvi-am: install-exec-am: install-libLTLIBRARIES install-html: install-html-recursive install-html-am: install-info: install-info-recursive install-info-am: install-man: install-pdf: install-pdf-recursive install-pdf-am: install-ps: install-ps-recursive install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-recursive -rm -rf ./$(DEPDIR) util/$(DEPDIR) -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-recursive mostlyclean-am: mostlyclean-compile mostlyclean-generic \ mostlyclean-libtool pdf: pdf-recursive pdf-am: ps: ps-recursive ps-am: uninstall-am: uninstall-libLTLIBRARIES .MAKE: $(am__recursive_targets) install-am install-strip .PHONY: $(am__recursive_targets) CTAGS GTAGS TAGS all all-am check \ check-am clean clean-generic clean-libLTLIBRARIES \ clean-libtool cscopelist-am ctags ctags-am distclean \ distclean-compile distclean-generic distclean-libtool \ distclean-tags distdir dvi dvi-am html html-am info info-am \ install install-am install-data install-data-am install-dvi \ install-dvi-am install-exec install-exec-am install-html \ install-html-am install-info install-info-am \ install-libLTLIBRARIES install-man install-pdf install-pdf-am \ install-ps install-ps-am install-strip installcheck \ installcheck-am installdirs installdirs-am maintainer-clean \ maintainer-clean-generic mostlyclean mostlyclean-compile \ mostlyclean-generic mostlyclean-libtool pdf pdf-am ps ps-am \ tags tags-am uninstall uninstall-am uninstall-libLTLIBRARIES # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: libstoragemgmt-1.2.3/c_binding/lsm_ipc.cpp0000664000175000017500000005141112537737032015541 00000000000000/* * Copyright (C) 2011-2014 Red Hat, Inc. * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; If not, see . * * Author: tasleson */ #include "lsm_ipc.hpp" #include "libstoragemgmt/libstoragemgmt_plug_interface.h" #include #include #include #include #include #include #include #include #include #include #include #ifdef HAVE_CONFIG_H #include "config.h" #endif #ifdef HAVE_YAJL_YAJL_VERSION_H #include #endif #if defined(HAVE_YAJL_YAJL_VERSION_H) && YAJL_MAJOR > 1 #define LSM_NEW_YAJL #endif static std::string zero_pad_num(unsigned int num) { std::ostringstream ss; ss << std::setw(Transport::HDR_LEN) << std::setfill('0') << num; return ss.str(); } Transport::Transport():s(-1) { } Transport::Transport(int socket_desc):s(socket_desc) { } int Transport::msg_send(const std::string & msg, int &error_code) { int rc = -1; error_code = 0; if (msg.size() > 0) { ssize_t written = 0; //fprintf(stderr, ">>> %s\n", msg.c_str()); std::string data = zero_pad_num(msg.size()) + msg; ssize_t msg_size = data.size(); while (written < msg_size) { int wrote = send(s, data.c_str() + written, (msg_size - written), MSG_NOSIGNAL); //Prevent SIGPIPE on write if (wrote != -1) { written += wrote; } else { error_code = errno; break; } } if ((written == msg_size) && error_code == 0) { rc = 0; } } return rc; } static std::string string_read(int fd, size_t count, int &error_code) { char buff[4096]; size_t amount_read = 0; std::string rc = ""; error_code = 0; while (amount_read < count) { ssize_t rd = recv(fd, buff, std::min(sizeof(buff), (count - amount_read)), MSG_WAITALL); if (rd > 0) { amount_read += rd; rc += std::string(buff, rd); } else { error_code = errno; break; } } if ((amount_read == count) && (error_code == 0)) return rc; else throw EOFException(""); } std::string Transport::msg_recv(int &error_code) { std::string msg; error_code = 0; unsigned long int payload_len = 0; std::string len = string_read(s, HDR_LEN, error_code); //Read the length if (len.size() && error_code == 0) { payload_len = strtoul(len.c_str(), NULL, 10); if (payload_len < 0x80000000) { /* Should be big enough */ msg = string_read(s, payload_len, error_code); } //fprintf(stderr, "<<< %s\n", msg.c_str()); } return msg; } int Transport::socket_get(const std::string & path, int &error_code) { int sfd = socket(AF_UNIX, SOCK_STREAM, 0); int rc = -1; error_code = 0; if (sfd != -1) { struct sockaddr_un addr; memset(&addr, 0, sizeof(addr)); addr.sun_family = AF_UNIX; strncpy(addr.sun_path, path.c_str(), sizeof(addr.sun_path) - 1); // Connect rc = connect(sfd, (struct sockaddr *) &addr, sizeof(addr)); if (rc != 0) { error_code = errno; rc = -1; //Redundant, connect should set to -1 on error ::close(sfd); } else { rc = sfd; //We are good to go. } } return rc; } Transport::~Transport() { close(); } int Transport::close() { int rc = EBADF; if (s >= 0) { int rc =::close(s); if (rc != 0) { rc = errno; } // Regardless, clear out socket s = -1; } return rc; } EOFException::EOFException(std::string m):std::runtime_error(m) { } ValueException::ValueException(std::string m):std::runtime_error(m) { } LsmException::LsmException(int code, std::string & msg): std::runtime_error(msg), error_code(code) { } LsmException::LsmException(int code, std::string & msg, const std:: string & debug_addl):std::runtime_error(msg), error_code(code), debug(debug_addl) { } LsmException::~LsmException()throw() { } LsmException::LsmException(int code, std::string & msg, const std::string & debug_addl, const std:: string & debug_data_addl):std::runtime_error(msg), error_code(code), debug(debug_addl), debug_data(debug_data_addl) { } Value::Value(void):t(null_t) { } Value::Value(bool v):t(boolean_t), s((v) ? "true" : "false") { } Value::Value(double v):t(numeric_t), s(to_string(v)) { } Value::Value(long double v):t(numeric_t), s(to_string(v)) { } Value::Value(uint32_t v):t(numeric_t), s(to_string(v)) { } Value::Value(int32_t v):t(numeric_t), s(to_string(v)) { } Value::Value(uint64_t v):t(numeric_t), s(to_string(v)) { } Value::Value(int64_t v):t(numeric_t), s(to_string(v)) { } Value::Value(value_type type, const std::string & v):t(type), s(v) { } Value::Value(const std::vector < Value > &v):t(array_t), array(v) { } Value::Value(const char *v) { if (v) { t = string_t; s = std::string(v); } else { t = null_t; } } Value::Value(const std::string & v):t(string_t), s(v) { } Value::Value(const std::map < std::string, Value > &v):t(object_t), obj(v) { } std::string Value::serialize(void) { const unsigned char *buf; std::string json; #ifdef LSM_NEW_YAJL size_t len; yajl_gen g = yajl_gen_alloc(NULL); if (g) { /* These could fail, but we will continue regardless */ yajl_gen_config(g, yajl_gen_beautify, 1); yajl_gen_config(g, yajl_gen_indent_string, " "); } #else unsigned int len; yajl_gen_config conf = { 1, " " }; yajl_gen g = yajl_gen_alloc(&conf, NULL); #endif if (g) { marshal(g); if (yajl_gen_status_ok == yajl_gen_get_buf(g, &buf, &len)) { json = std::string((const char *) buf); } yajl_gen_free(g); } return json; } Value::value_type Value::valueType() const { return t; } Value & Value::operator[](const std::string & key) { if (t == object_t) { return obj[key]; } throw ValueException("Value not object"); } Value & Value::operator[](uint32_t i) { if (t == array_t) { return array[i]; } throw ValueException("Value not array"); } bool Value::hasKey(const std::string & k) { if (t == object_t) { std::map < std::string, Value >::iterator iter = obj.find(k); if (iter != obj.end() && iter->first == k) { return true; } } return false; } bool Value::isValidRequest() { return (t == Value::object_t && hasKey("method") && hasKey("id") && hasKey("params")); } Value Value::getValue(const char *key) { if (hasKey(key)) { return obj[key]; } return Value(); } const char *Value::asNumString() { const char *rc = NULL; if (t == numeric_t) { rc = s.c_str(); } return rc; } void *Value::asVoid() { if (t == null_t) { return NULL; } throw ValueException("Value not null"); } bool Value::asBool() { if (t == boolean_t) { return (s == "true"); } throw ValueException("Value not boolean"); } double Value::asDouble() { if (t == numeric_t) { double rc; if (sscanf(s.c_str(), "%lf", &rc) > 0) { return rc; } throw ValueException("Value not a double"); } throw ValueException("Value not numeric"); } long double Value::asLongDouble() { if (t == numeric_t) { long double rc; if (sscanf(s.c_str(), "%Lf", &rc) > 0) { return rc; } throw ValueException("Value not a long double"); } throw ValueException("Value not numeric"); } int32_t Value::asInt32_t() { if (t == numeric_t) { int32_t rc; if (sscanf(s.c_str(), "%d", &rc) > 0) { return rc; } throw ValueException("Value not int32"); } throw ValueException("Value not numeric"); } int64_t Value::asInt64_t() { if (t == numeric_t) { int64_t rc; if (sscanf(s.c_str(), "%lld", (long long int *) &rc) > 0) { return rc; } throw ValueException("Not an integer"); } throw ValueException("Value not numeric"); } uint32_t Value::asUint32_t() { if (t == numeric_t) { uint32_t rc; if (sscanf(s.c_str(), "%u", &rc) > 0) { return rc; } throw ValueException("Not an integer"); } throw ValueException("Value not numeric"); } uint64_t Value::asUint64_t() { if (t == numeric_t) { uint64_t rc; if (sscanf(s.c_str(), "%llu", (long long unsigned int *) &rc) > 0) { return rc; } throw ValueException("Not an integer"); } throw ValueException("Value not numeric"); } std::string Value::asString() { if (t == string_t) { return s; } else if (t == null_t) { return std::string(); } throw ValueException("Value not string"); } const char *Value::asC_str() { if (t == string_t) { return s.c_str(); } else if (t == null_t) { return NULL; } throw ValueException("Value not string"); } std::map < std::string, Value > Value::asObject() { if (t == object_t) { return obj; } throw ValueException("Value not object"); } std::vector < Value > Value::asArray() { if (t == array_t) { return array; } throw ValueException("Value not array"); } void Value::marshal(yajl_gen g) { switch (t) { case (null_t): { if (yajl_gen_status_ok != yajl_gen_null(g)) { throw ValueException("yajl_gen_null failure"); } break; } case (boolean_t): { if (yajl_gen_status_ok != yajl_gen_bool(g, (s == "true") ? 1 : 0)) { throw ValueException("yajl_gen_bool failure"); } break; } case (string_t): { if (yajl_gen_status_ok != yajl_gen_string(g, (const unsigned char *) s.c_str(), s.size())) { throw ValueException("yajl_gen_string failure"); } break; } case (numeric_t): { if (yajl_gen_status_ok != yajl_gen_number(g, s.c_str(), s.size())) { throw ValueException("yajl_gen_number failure"); } break; } case (object_t): { if (yajl_gen_status_ok != yajl_gen_map_open(g)) { throw ValueException("yajl_gen_map_open failure"); } std::map < std::string, Value >::iterator iter; for (iter = obj.begin(); iter != obj.end(); iter++) { if (yajl_gen_status_ok != yajl_gen_string(g, (const unsigned char *) iter->first. c_str(), iter->first.size())) { throw ValueException("yajl_gen_string failure"); } iter->second.marshal(g); } if (yajl_gen_status_ok != yajl_gen_map_close(g)) { throw ValueException("yajl_gen_map_close failure"); } break; } case (array_t): { if (yajl_gen_status_ok != yajl_gen_array_open(g)) { throw ValueException("yajl_gen_array_open failure"); } for (unsigned int i = 0; i < array.size(); ++i) { array[i].marshal(g); } if (yajl_gen_status_ok != yajl_gen_array_close(g)) { throw ValueException("yajl_gen_array_close failure"); } break; } } } class LSM_DLL_LOCAL ParseElement { public: enum parse_type { null, boolean, string, number, begin_map, end_map, begin_array, end_array, map_key, unknown }; ParseElement():t(unknown) { } ParseElement(parse_type type):t(type) { } ParseElement(parse_type type, std::string value):t(type), v(value) { } parse_type t; std::string v; std::string to_string() { return "type " +::to_string(t) + ": value" + v; } }; #ifdef LSM_NEW_YAJL #define YAJL_SIZE_T size_t #else #define YAJL_SIZE_T unsigned int #endif static int handle_value(void *ctx, ParseElement::parse_type type) { std::list < ParseElement > *l = (std::list < ParseElement > *)ctx; l->push_back(ParseElement(type)); return 1; } static int handle_value(void *ctx, ParseElement::parse_type type, const char *s, size_t len) { std::list < ParseElement > *l = (std::list < ParseElement > *)ctx; l->push_back(ParseElement(type, std::string(s, len))); return 1; } static int handle_null(void *ctx) { return handle_value(ctx, ParseElement::null); } static int handle_boolean(void *ctx, int boolean) { std::string b = (boolean) ? "true" : "false"; return handle_value(ctx, ParseElement::boolean, b.c_str(), b.size()); } static int handle_number(void *ctx, const char *s, YAJL_SIZE_T len) { return handle_value(ctx, ParseElement::number, s, len); } static int handle_string(void *ctx, const unsigned char *stringVal, YAJL_SIZE_T len) { return handle_value(ctx, ParseElement::string, (const char *) stringVal, len); } static int handle_map_key(void *ctx, const unsigned char *stringVal, YAJL_SIZE_T len) { return handle_value(ctx, ParseElement::map_key, (const char *) stringVal, len); } static int handle_start_map(void *ctx) { return handle_value(ctx, ParseElement::begin_map); } static int handle_end_map(void *ctx) { return handle_value(ctx, ParseElement::end_map); } static int handle_start_array(void *ctx) { return handle_value(ctx, ParseElement::begin_array); } static int handle_end_array(void *ctx) { return handle_value(ctx, ParseElement::end_array); } static yajl_callbacks callbacks = { handle_null, handle_boolean, NULL, NULL, handle_number, handle_string, handle_start_map, handle_map_key, handle_end_map, handle_start_array, handle_end_array }; static ParseElement get_next(std::list < ParseElement > &l) { ParseElement rc = l.front(); l.pop_front(); return rc; } static Value ParseElements(std::list < ParseElement > &l); static Value HandleArray(std::list < ParseElement > &l) { std::vector < Value > values; ParseElement cur; if (!l.empty()) { do { cur = l.front(); if (cur.t != ParseElement::end_array) { values.push_back(ParseElements(l)); } else { get_next(l); } } while (!l.empty() && cur.t != ParseElement::end_array); } return Value(values); } static Value HandleObject(std::list < ParseElement > &l) { std::map < std::string, Value > values; ParseElement cur; if (!l.empty()) { do { cur = get_next(l); if (cur.t == ParseElement::map_key) { values[cur.v] = ParseElements(l); } else if (cur.t != ParseElement::end_map) { throw ValueException("Unexpected state: " + cur.to_string()); } } while (!l.empty() && cur.t != ParseElement::end_map); } return Value(values); } static Value ParseElements(std::list < ParseElement > &l) { if (!l.empty()) { ParseElement cur = get_next(l); switch (cur.t) { case (ParseElement::null): case (ParseElement::boolean): case (ParseElement::string): case (ParseElement::number): { return Value((Value::value_type) cur.t, cur.v); break; } case (ParseElement::begin_map): { return HandleObject(l); break; } case (ParseElement::end_map): { throw ValueException("Unexpected end_map"); break; } case (ParseElement::begin_array): { return HandleArray(l); break; } case (ParseElement::end_array): { throw ValueException("Unexpected end_array"); break; } case (ParseElement::map_key): { throw ValueException("Unexpected map_key"); break; } case (ParseElement::unknown): { throw ValueException("Unexpected unknown"); break; } } } return Value(); } std::string Payload::serialize(Value & v) { return v.serialize(); } Value Payload::deserialize(const std::string & json) { yajl_handle hand; yajl_status stat; std::list < ParseElement > l; #ifdef LSM_NEW_YAJL hand = yajl_alloc(&callbacks, NULL, (void *) &l); yajl_config(hand, yajl_allow_comments, 1); #else yajl_parser_config cfg = { 1, 1 }; hand = yajl_alloc(&callbacks, &cfg, NULL, (void *) &l); #endif if (hand) { stat = yajl_parse(hand, (const unsigned char *) json.c_str(), json.size()); yajl_free(hand); if (stat == yajl_status_ok) { return ParseElements(l); } else { throw ValueException("In-valid json"); } } return Value(); } Ipc::Ipc() { } Ipc::Ipc(int fd):t(fd) { } Ipc::Ipc(std::string socket_path) { int e = 0; int fd = Transport::socket_get(socket_path, e); if (fd >= 0) { t = Transport(fd); } } Ipc::~Ipc() { t.close(); } void Ipc::requestSend(const std::string request, const Value & params, int32_t id) { int rc = 0; int ec = 0; std::map < std::string, Value > v; v["method"] = Value(request); v["id"] = Value(id); v["params"] = params; Value req(v); rc = t.msg_send(Payload::serialize(req), ec); if (rc != 0) { std::string em = std::string("Error sending message: errno ") +::to_string(ec); throw LsmException((int) LSM_ERR_TRANSPORT_COMMUNICATION, em); } } void Ipc::errorSend(int error_code, std::string msg, std::string debug, uint32_t id) { int ec = 0; int rc = 0; std::map < std::string, Value > v; std::map < std::string, Value > error_data; error_data["code"] = Value(error_code); error_data["message"] = Value(msg); error_data["data"] = Value(debug); v["error"] = Value(error_data); v["id"] = Value(id); Value e(v); rc = t.msg_send(Payload::serialize(e), ec); if (rc != 0) { std::string em = std::string("Error sending error message: errno ") +::to_string(ec); throw LsmException((int) LSM_ERR_TRANSPORT_COMMUNICATION, em); } } Value Ipc::readRequest(void) { int ec; std::string resp = t.msg_recv(ec); return Payload::deserialize(resp); } void Ipc::responseSend(const Value & response, uint32_t id) { int rc; int ec; std::map < std::string, Value > v; v["id"] = id; v["result"] = response; Value resp(v); rc = t.msg_send(Payload::serialize(resp), ec); if (rc != 0) { std::string em = std::string("Error sending response: errno ") +::to_string(ec); throw LsmException((int) LSM_ERR_TRANSPORT_COMMUNICATION, em); } } Value Ipc::responseRead() { Value r = readRequest(); if (r.hasKey(std::string("result"))) { return r.getValue("result"); } else { std::map < std::string, Value > rp = r.asObject(); std::map < std::string, Value > error = rp["error"].asObject(); std::string msg = error["message"].asString(); std::string data = error["data"].asString(); throw LsmException((int) (error["code"].asInt32_t()), msg, data); } } Value Ipc::rpc(const std::string & request, const Value & params, int32_t id) { requestSend(request, params, id); return responseRead(); } libstoragemgmt-1.2.3/c_binding/lsm_plugin_ipc.hpp0000664000175000017500000000231712537737032017125 00000000000000/* * Copyright (C) 2011-2013 Red Hat, Inc. * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; If not, see . * * Author: tasleson */ #ifndef LSM_PLUGIN_IPC_HPP #define LSM_PLUGIN_IPC_HPP #include #include "libstoragemgmt/libstoragemgmt_common.h" template < typename K, typename V > class LSM_DLL_LOCAL static_map { private: std::map < K, V > _m; public: static_map(const K & key, const V & val) { _m[key] = val; } static_map < K, V > &operator()(const K & key, const V & val) { _m[key] = val; return *this; } operator std::map < K, V > () { return _m; } }; #endif libstoragemgmt-1.2.3/python_binding/0000775000175000017500000000000012542455463014565 500000000000000libstoragemgmt-1.2.3/python_binding/lsm/0000775000175000017500000000000012542455463015360 500000000000000libstoragemgmt-1.2.3/python_binding/lsm/_common.py0000664000175000017500000004367212537737032017314 00000000000000# Copyright (C) 2011-2014 Red Hat, Inc. # This library is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2.1 of the License, or any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library; If not, see . # # Author: tasleson import hashlib import os import unittest import urlparse import re import sys import syslog import collections import inspect import urllib2 import functools import traceback def default_property(name, allow_set=True, doc=None): """ Creates the get/set properties for the given name. It assumes that the actual attribute is '_' + name TODO: Expand this with domain validation to ensure the values are correct. """ attribute_name = '_' + name def getter(self): return getattr(self, attribute_name) def setter(self, value): setattr(self, attribute_name, value) prop = property(getter, setter if allow_set else None, None, doc) def decorator(cls): setattr(cls, name, prop) return cls return decorator def common_urllib2_error_handler(exp): if isinstance(exp, urllib2.HTTPError): raise LsmError(ErrorNumber.PLUGIN_AUTH_FAILED, str(exp)) if isinstance(exp, urllib2.URLError): desc = str(exp) if 'urlopen error' in desc: if 'Errno 111' in desc: raise LsmError(ErrorNumber.NETWORK_CONNREFUSED, 'Connection refused') if 'Errno 113' in desc: raise LsmError(ErrorNumber.NETWORK_HOSTDOWN, 'Host is down') error("Unexpected network error:\n" + traceback.format_exc()) raise LsmError(ErrorNumber.NETWORK_ERROR, desc) stack_trace = traceback.format_exc() error("Unexpected exception:\n" + stack_trace) raise LsmError(ErrorNumber.PLUGIN_BUG, "Unexpected exception", stack_trace) ## Documentation for Proxy class. # # Class to encapsulate the actual class we want to call. When an attempt is # made to access an attribute that doesn't exist we will raise an LsmError # instead of the default keyError. class Proxy(object): """ Used to provide an unambiguous error when a feature is not implemented. """ ## The constructor. # @param self The object self # @param obj The object instance to wrap def __init__(self, obj=None): """ Constructor which takes an object to wrap. """ self.proxied_obj = obj ## Called each time an attribute is requested of the object # @param self The object self # @param name Name of the attribute being accessed # @return The result of the method def __getattr__(self, name): """ Called each time an attribute is requested of the object """ if hasattr(self.proxied_obj, name): return functools.partial(self._present, name) else: raise LsmError(ErrorNumber.NO_SUPPORT, "Unsupported operation") ## Method which is called to invoke the actual method of interest. # @param self The object self # @param _proxy_method_name Method to invoke # @param args Arguments # @param kwargs Keyword arguments # @return The result of the method invocation def _present(self, _proxy_method_name, *args, **kwargs): """ Method which is called to invoke the actual method of interest. """ return getattr(self.proxied_obj, _proxy_method_name)(*args, **kwargs) # variable in client and specified on the command line for the daemon UDS_PATH = '/var/run/lsm/ipc' #Set to True for verbose logging LOG_VERBOSE = True ##Constant for byte size SIZE_CONS = { 'B': 1, 'KiB': 2 ** 10, 'KB': 10 ** 3, 'K': 2 ** 10, 'k': 2 ** 10, 'MiB': 2 ** 20, 'MB': 10 ** 6, 'M': 2 ** 20, 'm': 2 ** 20, 'GiB': 2 ** 30, 'GB': 10 ** 9, 'G': 2 ** 30, 'g': 2 ** 30, 'TiB': 2 ** 40, 'TB': 10 ** 12, 'T': 2 ** 40, 't': 2 ** 40, 'PiB': 2 ** 50, 'PB': 10 ** 15, 'P': 2 ** 50, 'p': 2 ** 50, 'EiB': 2 ** 60, 'EB': 10 ** 18, 'E': 2 ** 60, 'e': 2 ** 60, } SIZE_CONS_CHK_LST = ['EiB', 'PiB', 'TiB', 'GiB', 'MiB', 'KiB'] ##Converts the size into human format. # @param size Size in bytes # @param human True|False # @return Human representation of size def sh(size, human=False): """ Convert size in bytes to human readable size The return string will follow IEC binary prefixes, e.g. '1.9 KiB' For size less than 1024, we do nothing but return the int we get. TODO: Need a expect to handle when size is not a int. int() might do. """ units = None if human: for key_name in SIZE_CONS_CHK_LST: if size >= SIZE_CONS[key_name]: size /= float(SIZE_CONS[key_name]) units = key_name break if not units: units = "B" return "%.2f %s" % (size, units) else: return size ##Converts the size into human format. # @param size Size in bytes # @return Human representation of size in IEC binary size prefixes. def size_bytes_2_size_human(size): """ Convert integer size in bytes to human readable size. We are following rules of IEC binary prefixes on size: http://en.wikipedia.org/wiki/Gibibyte The biggest of unit this function supported is PiB. The precision is 2 which means you will get '1.99 KiB' """ return sh(size, True) ##Converts the size into human format. # @param size_human Human readable size string, e.g. '1.9 KiB' # @return Size in bytes def size_human_2_size_bytes(size_human): """ Convert human readable size string into integer size in bytes. Following rules of IEC binary prefixes on size: http://en.wikipedia.org/wiki/Gibibyte Supported input size_human in these formats: '1.9KiB' # int(1024*1.9) '1 KiB' # 2**10 '1B' # 1 '2K' # 2*(2**10), treated as '2KiB' '2k' # 2*(2**10), treated as '2KiB' '2KB' # 2*(10**3) """ regex_size_human = re.compile(r""" ^ ([0-9\.]+) # 1: number [ \t]* # might have space between number and unit ([a-zA-Z]*) # 2: units $ """, re.X) regex_match = regex_size_human.match(size_human) units = '' number = 0 size_bytes = 0 if regex_match: number = regex_match.group(1) units = regex_match.group(2) if not units: return int(number) units = units.upper() units = units.replace('IB', 'iB') if units in SIZE_CONS: size_bytes = SIZE_CONS[units] * float(number) return int(size_bytes) ## Common method used to parse a URI. # @param uri The uri to parse # @param requires Optional list of keys that must be present in output # @param required_params Optional list of required parameters that # must be present. # @return A hash of the parsed values. def uri_parse(uri, requires=None, required_params=None): """ Common uri parse method that optionally can check for what is needed before returning successfully. """ rc = {} u = urlparse.urlparse(uri) if u.scheme: rc['scheme'] = u.scheme if u.netloc: rc['netloc'] = u.netloc if u.port: rc['port'] = u.port if u.hostname: rc['host'] = u.hostname if u.username: rc['username'] = u.username else: rc['username'] = None rc['parameters'] = uri_parameters(u) if requires: for r in requires: if r not in rc: raise LsmError(ErrorNumber.PLUGIN_BUG, 'uri missing \"%s\" or is in invalid form' % r) if required_params: for r in required_params: if r not in rc['parameters']: raise LsmError(ErrorNumber.PLUGIN_BUG, 'uri missing query parameter %s' % r) return rc ## Parses the parameters (Query string) of the URI # @param uri Full uri # @returns hash of the query string parameters. def uri_parameters(uri): # workaround for python bug: # http://bugs.python.org/issue9374 # for URL: smispy+ssl://admin@emc-smi:5989?namespace=root/emc # Before the patch commited( RHEL 6 and Fedora 18- ): # '?namespace=root/emc' is saved in uri.path # After patched(RHEL 7 and Fedora 19+): # 'namespace=root/emc' is saved in uri.query query = '' if uri.query: query = uri.query elif uri.path: query = urlparse.urlparse('http:' + uri[2]).query else: return {} if query: return dict([part.split('=') for part in query.split('&')]) else: return {} ## Generates the md5 hex digest of passed in parameter. # @param t Item to generate signature on. # @returns md5 hex digest. def md5(t): h = hashlib.md5() h.update(t) return h.hexdigest() ## Converts a list of arguments to string. # @param args Args to join # @return string of arguments joined together. def params_to_string(*args): return ''.join([str(e) for e in args]) # Unfortunately the process name remains as 'python' so we are using argv[0] in # the output to allow us to determine which python exe is indeed logging to # syslog. # TODO: On newer versions of python this is no longer true, need to fix. ## Posts a message to the syslogger. # @param level Logging level # @param prg Program name # @param msg Message to log. def post_msg(level, prg, msg): """ If a message includes new lines we will create multiple syslog entries so that the message is readable. Otherwise it isn't very readable. Hopefully we won't be logging much :-) """ for l in msg.split('\n'): if len(l): syslog.syslog(level, prg + ": " + l) def error(*msg): post_msg(syslog.LOG_ERR, os.path.basename(sys.argv[0]), params_to_string(*msg)) def info(*msg): if LOG_VERBOSE: post_msg(syslog.LOG_INFO, os.path.basename(sys.argv[0]), params_to_string(*msg)) class SocketEOF(Exception): """ Exception class to indicate when we read zero bytes from a socket. """ pass @default_property('code', 'Error code') @default_property('msg', 'Error message') @default_property('data', 'Optional error data') class LsmError(Exception): def __init__(self, code, message, data=None, *args, **kwargs): """ Class represents an error. """ Exception.__init__(self, *args, **kwargs) self._code = code self._msg = message self._data = data def __str__(self): error_no_str = ErrorNumber.error_number_to_str(self.code) if self.data is not None and self.data: return "%s: %s Data: %s" % \ (error_no_str, self.msg, self.data) else: return "%s: %s " % (error_no_str, self.msg) def addl_error_data(domain, level, exception, debug=None, debug_data=None): """ Used for gathering additional information about an error. """ return {'domain': domain, 'level': level, 'exception': exception, 'debug': debug, 'debug_data': debug_data} def get_class(class_name): """ Given a class name it returns the class, caller will then need to run the constructor to create. """ parts = class_name.split('.') module = ".".join(parts[:-1]) if len(module): m = __import__(module) for comp in parts[1:]: m = getattr(m, comp) else: m = __import__('__main__') m = getattr(m, class_name) return m #Note: Some of these don't make sense for python, but they do for other #Languages so we will be keeping them consistent even though we won't be #using them. class ErrorNumber(object): OK = 0 LIB_BUG = 1 PLUGIN_BUG = 2 JOB_STARTED = 7 TIMEOUT = 11 DAEMON_NOT_RUNNING = 12 NAME_CONFLICT = 50 EXISTS_INITIATOR = 52 INVALID_ARGUMENT = 101 NO_STATE_CHANGE = 125 NETWORK_CONNREFUSED = 140 # Host on network, but connection refused NETWORK_HOSTDOWN = 141 # Host unreachable on network NETWORK_ERROR = 142 # Generic network error NO_MEMORY = 152 NO_SUPPORT = 153 # Deletion related errors IS_MASKED = 160 # Volume is masked to access group. NOT_FOUND_ACCESS_GROUP = 200 NOT_FOUND_FS = 201 NOT_FOUND_JOB = 202 NOT_FOUND_POOL = 203 NOT_FOUND_FS_SS = 204 NOT_FOUND_VOLUME = 205 NOT_FOUND_NFS_EXPORT = 206 NOT_FOUND_SYSTEM = 208 NOT_FOUND_DISK = 209 NOT_LICENSED = 226 NO_SUPPORT_ONLINE_CHANGE = 250 NO_SUPPORT_OFFLINE_CHANGE = 251 PLUGIN_AUTH_FAILED = 300 # Client supplied credential are incorrect PLUGIN_IPC_FAIL = 301 # Inter-process communication between client & # out of process plug-in encountered connection # errors. PLUGIN_SOCKET_PERMISSION = 307 # Incorrect permission on UNIX domain # socket used for IPC PLUGIN_NOT_EXIST = 311 NOT_ENOUGH_SPACE = 350 TRANSPORT_COMMUNICATION = 400 TRANSPORT_SERIALIZATION = 401 TRANSPORT_INVALID_ARG = 402 LAST_INIT_IN_ACCESS_GROUP = 502 # refuse to remove the last initiator from access group UNSUPPORTED_SEARCH_KEY = 510 EMPTY_ACCESS_GROUP = 511 # volume_mask() will fail if access group # has no member/initiator. POOL_NOT_READY = 512 # Pool is not ready for create/resize/etc DISK_NOT_FREE = 513 # Disk is not in DISK.STATUS_FREE status. _LOCALS = locals() @staticmethod def error_number_to_str(error_no): for error_str in ErrorNumber._LOCALS.keys(): if ErrorNumber._LOCALS[error_str] == error_no: return "%s(%d)" % (error_str, error_no) return "UNKNOWN_ERROR_NUMBER(%d)" % error_no class JobStatus(object): INPROGRESS = 1 COMPLETE = 2 ERROR = 3 def type_compare(method_name, exp_type, act_val): if isinstance(exp_type, collections.Sequence): if not isinstance(act_val, collections.Sequence): raise TypeError("%s call is returning a %s, but is " "expecting a sequence" % (method_name, str(type(act_val)))) # If the list has only one expected value we will make sure all # elements in the list adhere to it, otherwise we will enforce a one # to one check against the expected types. if len(exp_type) == 1: for av in act_val: type_compare(method_name, exp_type[0], av) else: # Expect a 1-1 type match, extras get ignored at the moment for exp, act in zip(exp_type, act_val): type_compare(method_name, exp, act) else: # A number of times a method will return None or some valid type, # only check on the type if the value is not None if exp_type != type(act_val) and act_val is not None: if (exp_type == unicode and type(act_val) == str): return if not inspect.isclass(exp_type) or \ not issubclass(type(act_val), exp_type): raise TypeError('%s call expected: %s got: %s ' % (method_name, str(exp_type), str(type(act_val)))) def return_requires(*types): """ Decorator function that allows us to ensure that we are getting the correct types back from a function/method call. Note: This is normally frowned upon by the python community, but this API needs to be language agnostic, so making sure we have the correct types is quite important. """ def outer(func): @functools.wraps(func) def inner(*args, **kwargs): r = func(*args, **kwargs) # In this case the user did something like # @return_requires(int, string, int) # in this case we require that all the args are present. if len(types) > 1: if len(r) != len(types): raise TypeError("%s call expected %d " "return values, actual = %d" % (func.__name__, len(types), len(r))) type_compare(func.__name__, types, r) elif len(types) == 1: # We have one return type (but it could be a sequence) type_compare(func.__name__, types[0], r) return r return inner return outer class TestCommon(unittest.TestCase): def setUp(self): pass def test_simple(self): try: raise SocketEOF() except SocketEOF as e: self.assertTrue(isinstance(e, SocketEOF)) try: raise LsmError(10, 'Message', 'Data') except LsmError as e: self.assertTrue(e.code == 10 and e.msg == 'Message' and e.data == 'Data') ed = addl_error_data('domain', 'level', 'exception', 'debug', 'debug_data') self.assertTrue(ed['domain'] == 'domain' and ed['level'] == 'level' and ed['debug'] == 'debug' and ed['exception'] == 'exception' and ed['debug_data'] == 'debug_data') def tearDown(self): pass if __name__ == '__main__': unittest.main() libstoragemgmt-1.2.3/python_binding/lsm/external/0000775000175000017500000000000012542455463017202 500000000000000libstoragemgmt-1.2.3/python_binding/lsm/external/__init__.py0000664000175000017500000000000012537546123021217 00000000000000libstoragemgmt-1.2.3/python_binding/lsm/external/xmltodict.py0000664000175000017500000001054012537546123021501 00000000000000# This code taken from recipe # http://code.activestate.com/recipes/ # 573463-converting-xml-to-dictionary-and-back/ # Modified slightly to remove namespace and number of other small details # Licensed: PSF from xml.etree import ElementTree def _ns(tag): return tag[tag.find('}') + 1:] class XmlDictObject(dict): """ Adds object like functionality to the standard dictionary. """ def __init__(self, initdict=None): if initdict is None: initdict = {} dict.__init__(self, initdict) def __getattr__(self, item): return self.__getitem__(item) def __setattr__(self, item, value): self.__setitem__(item, value) def __str__(self): if '_text' in self: return self.__getitem__('_text') else: return '' @staticmethod def wrap(x): """ Static method to wrap a dictionary recursively as an XmlDictObject """ if isinstance(x, dict): return XmlDictObject( (k, XmlDictObject.wrap(v)) for (k, v) in x.iteritems()) elif isinstance(x, list): return [XmlDictObject.wrap(v) for v in x] else: return x @staticmethod def _un_wrap(x): if isinstance(x, dict): return dict( (k, XmlDictObject._un_wrap(v)) for (k, v) in x.iteritems()) elif isinstance(x, list): return [XmlDictObject._un_wrap(v) for v in x] else: return x def un_wrap(self): """ Recursively converts an XmlDictObject to a standard dictionary and returns the result. """ return XmlDictObject._un_wrap(self) def _convert_dict_to_xml_recurse(parent, dictitem): assert isinstance(dictitem, dict) if isinstance(dictitem, dict): for (tag, child) in dictitem.iteritems(): if str(tag) == '_text': parent.text = str(child) elif isinstance(child, list): # iterate through the array and convert for listchild in child: elem = ElementTree.Element(tag) parent.append(elem) _convert_dict_to_xml_recurse(elem, listchild) else: elem = ElementTree.Element(tag) parent.append(elem) _convert_dict_to_xml_recurse(elem, child) else: parent.text = str(dictitem) def convert_dict_to_xml(xmldict): """ Converts a dictionary to an XML ElementTree Element """ roottag = xmldict.keys()[0] root = ElementTree.Element(roottag) _convert_dict_to_xml_recurse(root, xmldict[roottag]) return root def _convert_xml_to_dict_recurse(node, dictclass): nodedict = dictclass() if len(node.items()) > 0: # if we have attributes, set them if'attrib' in nodedict: nodedict['attrib'].update(dict(node.items())) else: nodedict['attrib'] = {} nodedict['attrib'].update(dict(node.items())) #We get a collision so attributes get their own hash! #nodedict.update(dict(node.items())) for child in node: # recursively add the element's children newitem = _convert_xml_to_dict_recurse(child, dictclass) if _ns(child.tag) in nodedict: # found duplicate tag, force a list if isinstance(nodedict[_ns(child.tag)], list): # append to existing list nodedict[_ns(child.tag)].append(newitem) else: # convert to list nodedict[_ns(child.tag)] = [nodedict[_ns(child.tag)], newitem] else: # only one, directly set the dictionary nodedict[_ns(child.tag)] = newitem if node.text is None: text = None else: text = node.text.strip() if len(nodedict) > 0: # if we have a dictionary add the text as a dictionary value # (if there is any) if text is not None and len(text) > 0: nodedict['_text'] = text else: # if we don't have child nodes or attributes, just set the text nodedict = text return nodedict def convert_xml_to_dict(root, dictclass=XmlDictObject): """ Converts an ElementTree Element to a dictionary """ return dictclass( {_ns(root.tag): _convert_xml_to_dict_recurse(root, dictclass)}) libstoragemgmt-1.2.3/python_binding/lsm/_transport.py0000664000175000017500000002011412537737032020042 00000000000000# Copyright (C) 2011-2013 Red Hat, Inc. # This library is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2.1 of the License, or any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library; If not, see . # # Author: tasleson import json import socket import string import os from _common import SocketEOF as _SocketEOF from _common import LsmError, ErrorNumber from _data import DataDecoder as _DataDecoder, DataEncoder as _DataEncoder import unittest import threading class TransPort(object): """ Provides wire serialization by using json. Loosely conforms to json-rpc, however a length header was added so that we would have the ability to use non sax like json parsers, which are more abundant. = 1) for i in wire: self.c.send(i) reply, msg_id = self.client.read_resp() self.assertTrue(payload == reply) def tearDown(self): self.client.send_req("done", None) resp, msg_id = self.client.read_resp() self.assertTrue(resp is None) self.server.join() if __name__ == "__main__": unittest.main() libstoragemgmt-1.2.3/python_binding/lsm/_pluginrunner.py0000664000175000017500000001242712537737032020546 00000000000000# Copyright (C) 2011-2013 Red Hat, Inc. # This library is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2.1 of the License, or any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library; If not, see . # # Author: tasleson import socket import traceback import sys from _common import SocketEOF as _SocketEOF from lsm import LsmError, error, ErrorNumber import _transport from lsm.lsmcli import cmd_line_wrapper def search_property(lsm_objs, search_key, search_value): """ This method does not check whether lsm_obj contain requested property. The method caller should do the check. """ if search_key is None: return lsm_objs return list(lsm_obj for lsm_obj in lsm_objs if getattr(lsm_obj, search_key) == search_value) class PluginRunner(object): """ Plug-in side common code which uses the passed in plugin to do meaningful work. """ @staticmethod def _is_number(val): """ Returns True if val is an integer. """ try: int(val) return True except ValueError: return False def __init__(self, plugin, args): self.cmdline = False if len(args) == 2 and PluginRunner._is_number(args[1]): try: fd = int(args[1]) self.tp = _transport.TransPort( socket.fromfd(fd, socket.AF_UNIX, socket.SOCK_STREAM)) #At this point we can return errors to the client, so we can #inform the client if the plug-in fails to create itself try: self.plugin = plugin() except Exception as e: exception_info = sys.exc_info() self.tp.send_error(0, -32099, 'Error instantiating plug-in ' + str(e)) raise exception_info[1], None, exception_info[2] except Exception: error(traceback.format_exc()) error('Plug-in exiting.') sys.exit(2) else: self.cmdline = True cmd_line_wrapper(plugin) def run(self): #Don't need to invoke this when running stand alone as a cmdline if self.cmdline: return need_shutdown = False msg_id = 0 try: while True: try: #result = None msg = self.tp.read_req() method = msg['method'] msg_id = msg['id'] params = msg['params'] #Check to see if this plug-in implements this operation #if not return the expected error. if hasattr(self.plugin, method): if params is None: result = getattr(self.plugin, method)() else: result = getattr(self.plugin, method)( **msg['params']) else: raise LsmError(ErrorNumber.NO_SUPPORT, "Unsupported operation") self.tp.send_resp(result) if method == 'plugin_register': need_shutdown = True if method == 'plugin_unregister': #This is a graceful plugin_unregister need_shutdown = False self.tp.close() break except ValueError as ve: error(traceback.format_exc()) self.tp.send_error(msg_id, -32700, str(ve)) except AttributeError as ae: error(traceback.format_exc()) self.tp.send_error(msg_id, -32601, str(ae)) except LsmError as lsm_err: self.tp.send_error(msg_id, lsm_err.code, lsm_err.msg, lsm_err.data) except _SocketEOF: #Client went away and didn't meet our expectations for protocol, #this error message should not be seen as it shouldn't be occuring. if need_shutdown: error('Client went away, exiting plug-in') except Exception: error("Unhandled exception in plug-in!\n" + traceback.format_exc()) try: self.tp.send_error(msg_id, ErrorNumber.PLUGIN_BUG, "Unhandled exception in plug-in", str(traceback.format_exc())) except Exception: pass finally: if need_shutdown: #Client wasn't nice, we will allow plug-in to cleanup self.plugin.plugin_unregister() sys.exit(2) libstoragemgmt-1.2.3/python_binding/lsm/__init__.py0000664000175000017500000000106612537546123017412 00000000000000__all__ = [] from version import VERSION from _common import error, info, LsmError, ErrorNumber, \ JobStatus, uri_parse, md5, Proxy, size_bytes_2_size_human, \ common_urllib2_error_handler, size_human_2_size_bytes from _data import (Disk, Volume, Pool, System, FileSystem, FsSnapshot, NfsExport, BlockRange, AccessGroup, TargetPort, Capabilities) from _iplugin import IPlugin, IStorageAreaNetwork, INetworkAttachedStorage, \ INfs from _client import Client from _pluginrunner import PluginRunner, search_property libstoragemgmt-1.2.3/python_binding/lsm/_data.py0000664000175000017500000006667312537737032016743 00000000000000# Copyright (C) 2011-2014 Red Hat, Inc. # This library is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2.1 of the License, or any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library; If not, see . # # Author: tasleson # Gris Ge from abc import ABCMeta as _ABCMeta import re try: import simplejson as json except ImportError: import json from json.decoder import WHITESPACE from _common import get_class, default_property, ErrorNumber, LsmError class DataEncoder(json.JSONEncoder): """ Custom json encoder for objects derived form ILsmData """ def default(self, my_class): if not isinstance(my_class, IData): raise ValueError('incorrect class type:' + str(type(my_class))) else: return my_class._to_dict() class DataDecoder(json.JSONDecoder): """ Custom json decoder for objects derived from ILsmData """ @staticmethod def __process_dict(d): """ Processes a dictionary """ rc = {} if 'class' in d: rc = IData._factory(d) else: for (k, v) in d.iteritems(): rc[k] = DataDecoder.__decode(v) return rc @staticmethod def __process_list(l): """ Processes a list """ rc = [] for elem, value in enumerate(l): if type(value) is list: rc.append(DataDecoder.__process_list(value)) elif type(value) is dict: rc.append(DataDecoder.__process_dict(value)) else: rc.append(value) return rc @staticmethod def __decode(e): """ Decodes the parsed json """ if type(e) is dict: return DataDecoder.__process_dict(e) elif type(e) is list: return DataDecoder.__process_list(e) else: return e def decode(self, json_string, _w=WHITESPACE.match): return DataDecoder.__decode(json.loads(json_string)) class IData(object): """ Base class functionality of serializable classes. """ __metaclass__ = _ABCMeta def _to_dict(self): """ Represent the class as a dictionary """ rc = {'class': self.__class__.__name__} #If one of the attributes is another IData we will #process that too, is there a better way to handle this? for (k, v) in self.__dict__.items(): if isinstance(v, IData): rc[k[1:]] = v._to_dict() else: rc[k[1:]] = v return rc @staticmethod def _factory(d): """ Factory for creating the appropriate class given a dictionary. This only works for objects that inherit from IData """ if 'class' in d: class_name = d['class'] del d['class'] c = get_class(__name__ + '.' + class_name) #If any of the parameters are themselves an IData process them for k, v in d.items(): if isinstance(v, dict) and 'class' in v: d['_' + k] = IData._factory(d.pop(k)) else: d['_' + k] = d.pop(k) return c(**d) def __str__(self): """ Used for human string representation. """ return str(self._to_dict()) @default_property('id', doc="Unique identifier") @default_property('name', doc="Disk name (aka. vendor)") @default_property('disk_type', doc="Enumerated type of disk") @default_property('block_size', doc="Size of each block") @default_property('num_of_blocks', doc="Total number of blocks") @default_property('status', doc="Enumerated status") @default_property('system_id', doc="System identifier") @default_property("plugin_data", doc="Private plugin data") class Disk(IData): """ Represents a disk. """ SUPPORTED_SEARCH_KEYS = ['id', 'system_id'] # We use '-1' to indicate we failed to get the requested number. # For example, when block found is undetectable, we use '-1' instead of # confusing 0. BLOCK_COUNT_NOT_FOUND = -1 BLOCK_SIZE_NOT_FOUND = -1 TYPE_UNKNOWN = 0 TYPE_OTHER = 1 TYPE_ATA = 3 # IDE disk which is seldomly used. TYPE_SATA = 4 TYPE_SAS = 5 TYPE_FC = 6 TYPE_SOP = 7 # SCSI over PCIe(SSD) TYPE_SCSI = 8 TYPE_LUN = 9 # Remote LUN was treated as a disk. # Due to complesity of disk types, we are defining these beside DMTF # standards: TYPE_NL_SAS = 51 # Near-Line SAS==SATA disk + SAS port. # in DMTF CIM 2.34.0+ CIM_DiskDrive['DiskType'], they also defined # SSD and HYBRID disk type. We use it as faillback. TYPE_HDD = 52 # Normal HDD TYPE_SSD = 53 # Solid State Drive TYPE_HYBRID = 54 # uses a combination of HDD and SSD STATUS_UNKNOWN = 1 << 0 STATUS_OK = 1 << 1 STATUS_OTHER = 1 << 2 STATUS_PREDICTIVE_FAILURE = 1 << 3 STATUS_ERROR = 1 << 4 STATUS_REMOVED = 1 << 5 STATUS_STARTING = 1 << 6 STATUS_STOPPING = 1 << 7 STATUS_STOPPED = 1 << 8 STATUS_INITIALIZING = 1 << 9 STATUS_MAINTENANCE_MODE = 1 << 10 # In maintenance for bad sector scan, integerity check and etc # It might be combined with STATUS_OK or # STATUS_STOPPED for online maintenance or offline maintenance. STATUS_SPARE_DISK = 1 << 11 # Indicate disk is a spare disk. STATUS_RECONSTRUCT = 1 << 12 # Indicate disk is reconstructing data. STATUS_FREE = 1 << 13 # New in version 1.2, indicate the whole disk is not holding any data or # acting as a dedicate spare disk. # This disk could be assigned as a dedicated spare disk or used for # creating pool. # If any spare disk(like those on NetApp ONTAP) does not require # any explicit action when assigning to pool, it should be treated as # free disk and marked as STATUS_FREE|STATUS_SPARE_DISK. def __init__(self, _id, _name, _disk_type, _block_size, _num_of_blocks, _status, _system_id, _plugin_data=None): self._id = _id self._name = _name self._disk_type = _disk_type self._block_size = _block_size self._num_of_blocks = _num_of_blocks self._status = _status self._system_id = _system_id self._plugin_data = _plugin_data @property def size_bytes(self): """ Disk size in bytes. """ return self.block_size * self.num_of_blocks def __str__(self): return self.name # Lets do this once outside of the class to minimize the number of # times it needs to be compiled. _vol_regex_vpd83 = re.compile('(?:^6[0-9a-f]{31})|(?:^[235][0-9a-f]{15})$') @default_property('id', doc="Unique identifier") @default_property('name', doc="User given name") @default_property('vpd83', doc="Vital product page 0x83 identifier") @default_property('block_size', doc="Volume block size") @default_property('num_of_blocks', doc="Number of blocks") @default_property('admin_state', doc="Enabled or disabled by administrator") @default_property('system_id', doc="System identifier") @default_property('pool_id', doc="Pool identifier") @default_property("plugin_data", doc="Private plugin data") class Volume(IData): """ Represents a volume. """ SUPPORTED_SEARCH_KEYS = ['id', 'system_id', 'pool_id'] #Replication types REPLICATE_UNKNOWN = -1 REPLICATE_CLONE = 2 REPLICATE_COPY = 3 REPLICATE_MIRROR_SYNC = 4 REPLICATE_MIRROR_ASYNC = 5 #Provisioning types PROVISION_UNKNOWN = -1 PROVISION_THIN = 1 PROVISION_FULL = 2 PROVISION_DEFAULT = 3 ADMIN_STATE_DISABLED = 0 ADMIN_STATE_ENABLED = 1 RAID_TYPE_UNKNOWN = -1 # The plugin failed to detect the volume's RAID type. RAID_TYPE_RAID0 = 0 # Stripe RAID_TYPE_RAID1 = 1 # Mirror for two disks. For 4 disks or more, they are RAID10. RAID_TYPE_RAID3 = 3 # Byte-level striping with dedicated parity RAID_TYPE_RAID4 = 4 # Block-level striping with dedicated parity RAID_TYPE_RAID5 = 5 # Block-level striping with distributed parity RAID_TYPE_RAID6 = 6 # Block-level striping with two distributed parities, aka, RAID-DP RAID_TYPE_RAID10 = 10 # Stripe of mirrors RAID_TYPE_RAID15 = 15 # Parity of mirrors RAID_TYPE_RAID16 = 16 # Dual parity of mirrors RAID_TYPE_RAID50 = 50 # Stripe of parities RAID_TYPE_RAID60 = 60 # Stripe of dual parities RAID_TYPE_RAID51 = 51 # Mirror of parities RAID_TYPE_RAID61 = 61 # Mirror of dual parities RAID_TYPE_JBOD = 20 # Just bunch of disks, no parity, no striping. RAID_TYPE_MIXED = 21 # This volume contains multiple RAID settings. RAID_TYPE_OTHER = 22 # Vendor specific RAID type STRIP_SIZE_UNKNOWN = 0 DISK_COUNT_UNKNOWN = 0 MIN_IO_SIZE_UNKNOWN = 0 OPT_IO_SIZE_UNKNOWN = 0 VCR_STRIP_SIZE_DEFAULT = 0 def __init__(self, _id, _name, _vpd83, _block_size, _num_of_blocks, _admin_state, _system_id, _pool_id, _plugin_data=None): self._id = _id # Identifier self._name = _name # Human recognisable name if _vpd83 and not Volume.vpd83_verify(_vpd83): raise LsmError(ErrorNumber.INVALID_ARGUMENT, "Incorrect format of VPD 0x83 NAA(3) string: '%s', " "expecting 32 or 16 lower case hex characters" % _vpd83) self._vpd83 = _vpd83 # SCSI page 83 unique ID self._block_size = _block_size # Block size self._num_of_blocks = _num_of_blocks # Number of blocks self._admin_state = _admin_state # enable or disabled by admin self._system_id = _system_id # System id this volume belongs self._pool_id = _pool_id # Pool id this volume belongs self._plugin_data = _plugin_data @property def size_bytes(self): """ Volume size in bytes. """ return self.block_size * self.num_of_blocks def __str__(self): return self.name @staticmethod def vpd83_verify(vpd): """ Returns True if string is valid vpd 0x83 representation """ if vpd and _vol_regex_vpd83.match(vpd): return True return False @default_property('id', doc="Unique identifier") @default_property('name', doc="User defined system name") @default_property('status', doc="Enumerated status of system") @default_property('status_info', doc="Detail status information of system") @default_property("plugin_data", doc="Private plugin data") class System(IData): STATUS_UNKNOWN = 1 << 0 STATUS_OK = 1 << 1 STATUS_ERROR = 1 << 2 STATUS_DEGRADED = 1 << 3 STATUS_PREDICTIVE_FAILURE = 1 << 4 STATUS_OTHER = 1 << 5 def __init__(self, _id, _name, _status, _status_info, _plugin_data=None): self._id = _id self._name = _name self._status = _status self._status_info = _status_info self._plugin_data = _plugin_data @default_property('id', doc="Unique identifier") @default_property('name', doc="User supplied name") @default_property('total_space', doc="Total space in bytes") @default_property('free_space', doc="Free space in bytes") @default_property('status', doc="Enumerated status") @default_property('status_info', doc="Text explaining status") @default_property('system_id', doc="System identifier") @default_property("plugin_data", doc="Plug-in private data") @default_property("element_type", doc="What pool can be used for") @default_property("unsupported_actions", doc="What cannot be done with this pool") class Pool(IData): """ Pool specific information """ SUPPORTED_SEARCH_KEYS = ['id', 'system_id'] TOTAL_SPACE_NOT_FOUND = -1 FREE_SPACE_NOT_FOUND = -1 # Element Type indicate what kind of element could this pool create: # * Another Pool # * Volume (aka, LUN) # * System Reserved Pool. ELEMENT_TYPE_POOL = 1 << 1 ELEMENT_TYPE_VOLUME = 1 << 2 ELEMENT_TYPE_FS = 1 << 3 ELEMENT_TYPE_DELTA = 1 << 4 ELEMENT_TYPE_VOLUME_FULL = 1 << 5 ELEMENT_TYPE_VOLUME_THIN = 1 << 6 ELEMENT_TYPE_SYS_RESERVED = 1 << 10 # Reserved for system use # Unsupported actions, what pool cannot be used for UNSUPPORTED_VOLUME_GROW = 1 << 0 UNSUPPORTED_VOLUME_SHRINK = 1 << 1 # Pool status could be any combination of these status. STATUS_UNKNOWN = 1 << 0 STATUS_OK = 1 << 1 STATUS_OTHER = 1 << 2 STATUS_DEGRADED = 1 << 4 STATUS_ERROR = 1 << 5 STATUS_STOPPED = 1 << 9 STATUS_RECONSTRUCTING = 1 << 12 STATUS_VERIFYING = 1 << 13 STATUS_INITIALIZING = 1 << 14 STATUS_GROWING = 1 << 15 MEMBER_TYPE_UNKNOWN = 0 MEMBER_TYPE_OTHER = 1 MEMBER_TYPE_DISK = 2 MEMBER_TYPE_POOL = 3 def __init__(self, _id, _name, _element_type, _unsupported_actions, _total_space, _free_space, _status, _status_info, _system_id, _plugin_data=None): self._id = _id # Identifier self._name = _name # Human recognisable name self._element_type = _element_type # What pool can be used to create self._unsupported_actions = _unsupported_actions # What pool cannot be # used for self._total_space = _total_space # Total size self._free_space = _free_space # Free space available self._status = _status # Status of pool. self._status_info = _status_info # Additional status text of pool self._system_id = _system_id # System id this pool belongs self._plugin_data = _plugin_data # Plugin private data @default_property('id', doc="Unique identifier") @default_property('name', doc="File system name") @default_property('total_space', doc="Total space in bytes") @default_property('free_space', doc="Free space available") @default_property('pool_id', doc="What pool the file system resides on") @default_property('system_id', doc="System ID") @default_property("plugin_data", "Private plugin data") class FileSystem(IData): SUPPORTED_SEARCH_KEYS = ['id', 'system_id', 'pool_id'] def __init__(self, _id, _name, _total_space, _free_space, _pool_id, _system_id, _plugin_data=None): self._id = _id self._name = _name self._total_space = _total_space self._free_space = _free_space self._pool_id = _pool_id self._system_id = _system_id self._plugin_data = _plugin_data @default_property('id', doc="Unique identifier") @default_property('name', doc="Snapshot name") @default_property('ts', doc="Time stamp the snapshot was created") @default_property("plugin_data", "Private plugin data") class FsSnapshot(IData): def __init__(self, _id, _name, _ts, _plugin_data=None): self._id = _id self._name = _name self._ts = int(_ts) self._plugin_data = _plugin_data @default_property('id', doc="Unique identifier") @default_property('fs_id', doc="Filesystem that is exported") @default_property('export_path', doc="Export path") @default_property('auth', doc="Authentication type") @default_property('root', doc="List of hosts with no_root_squash") @default_property('rw', doc="List of hosts with Read & Write privileges") @default_property('ro', doc="List of hosts with Read only privileges") @default_property('anonuid', doc="UID for anonymous user id") @default_property('anongid', doc="GID for anonymous group id") @default_property('options', doc="String containing advanced options") @default_property('plugin_data', doc="Plugin private data") class NfsExport(IData): SUPPORTED_SEARCH_KEYS = ['id', 'fs_id'] ANON_UID_GID_NA = -1 ANON_UID_GID_ERROR = -2 def __init__(self, _id, _fs_id, _export_path, _auth, _root, _rw, _ro, _anonuid, _anongid, _options, _plugin_data=None): assert (_fs_id is not None) assert (_export_path is not None) self._id = _id self._fs_id = _fs_id # File system exported self._export_path = _export_path # Export path self._auth = _auth # Authentication type self._root = _root # List of hosts with no_root_squash self._rw = _rw # List of hosts with read/write self._ro = _ro # List of hosts with read/only self._anonuid = _anonuid # uid for anonymous user id self._anongid = _anongid # gid for anonymous group id self._options = _options # NFS options self._plugin_data = _plugin_data @default_property('src_block', doc="Source logical block address") @default_property('dest_block', doc="Destination logical block address") @default_property('block_count', doc="Number of blocks") class BlockRange(IData): def __init__(self, _src_block, _dest_block, _block_count): self._src_block = _src_block self._dest_block = _dest_block self._block_count = _block_count @default_property('id', doc="Unique instance identifier") @default_property('name', doc="Access group name") @default_property('init_ids', doc="List of initiator IDs") @default_property('init_type', doc="Initiator type") @default_property('system_id', doc="System identifier") @default_property('plugin_data', doc="Plugin private data") class AccessGroup(IData): SUPPORTED_SEARCH_KEYS = ['id', 'system_id'] INIT_TYPE_UNKNOWN = 0 INIT_TYPE_OTHER = 1 INIT_TYPE_WWPN = 2 INIT_TYPE_ISCSI_IQN = 5 INIT_TYPE_ISCSI_WWPN_MIXED = 7 def __init__(self, _id, _name, _init_ids, _init_type, _system_id, _plugin_data=None): self._id = _id self._name = _name # AccessGroup name self._init_ids = AccessGroup._standardize_init_list(_init_ids) # A list of Initiator ID strings. self._init_type = _init_type self._system_id = _system_id # System id this group belongs self._plugin_data = _plugin_data @staticmethod def _standardize_init_list(init_ids): rc = [] for i in init_ids: valid, init_type, init_id = AccessGroup.initiator_id_verify(i) if valid: rc.append(init_id) else: raise LsmError(LsmError.ErrorNumber.INVALID_ARGUMENT, "Invalid initiator ID %s" % i) return rc _regex_wwpn = re.compile(r""" ^(0x|0X)?([0-9A-Fa-f]{2}) (([\.:\-])?[0-9A-Fa-f]{2}){7}$ """, re.X) @staticmethod def initiator_id_verify(init_id, init_type=None, raise_exception=False): """ Public method which can be used to verify an initiator id :param init_id: :param init_type: :param raise_exception: Will throw a LsmError INVALID_ARGUMENT if not a valid initiator address :return:(Bool, init_type, init_id) Note: init_id will be returned in normalized format if it's a WWPN """ if init_id.startswith('iqn') or init_id.startswith('eui') or\ init_id.startswith('naa'): if init_type is None or \ init_type == AccessGroup.INIT_TYPE_ISCSI_IQN: return True, AccessGroup.INIT_TYPE_ISCSI_IQN, init_id if AccessGroup._regex_wwpn.match(str(init_id)): if init_type is None or \ init_type == AccessGroup.INIT_TYPE_WWPN: return (True, AccessGroup.INIT_TYPE_WWPN, AccessGroup._wwpn_to_lsm_type(init_id)) if raise_exception: raise LsmError(ErrorNumber.INVALID_ARGUMENT, "Initiator id '%s' is invalid" % init_id) return False, None, None @staticmethod def _wwpn_to_lsm_type(wwpn, raise_error=True): """ Conver provided WWPN string into LSM standarded one: LSM WWPN format: ^(?:[0-9a-f]{2}:){7}[0-9a-f]{2}$ LSM WWPN Example: 10:00:00:00:c9:95:2f:de Acceptable WWPN format is: ^[0x|0X]{0,1}(:?[0-9A-Fa-f]{2}[\.\-:]{0,1}){7}[0-9A-Fa-f]{2}$ Acceptable WWPN example: 10:00:00:00:c9:95:2f:de 10:00:00:00:C9:95:2F:DE 10-00-00-00-C9-95-2F-DE 10-00-00-00-c9-95-2f-de 10.00.00.00.C9.95.2F.DE 10.00.00.00.c9.95.2f.de 0x10000000c9952fde 0X10000000C9952FDE 10000000c9952fde 10000000C9952FDE Return the LSM WWPN Return None if raise_error is False and not a valid WWPN. """ if AccessGroup._regex_wwpn.match(str(wwpn)): s = str(wwpn) s = s.lower() s = re.sub(r'0x', '', s) s = re.sub(r'[^0-9a-f]', '', s) s = ":".join(re.findall(r'..', s)) return s if raise_error: raise LsmError(ErrorNumber.INVALID_ARGUMENT, "Invalid WWPN Initiator: %s" % wwpn) return None @default_property('id', doc="Unique instance identifier") @default_property('port_type', doc="Target port type") @default_property('service_address', doc="Target port service address") @default_property('network_address', doc="Target port network address") @default_property('physical_address', doc="Target port physical address") @default_property('physical_name', doc="Target port physical port name") @default_property('system_id', doc="System identifier") @default_property('plugin_data', doc="Plugin private data") class TargetPort(IData): SUPPORTED_SEARCH_KEYS = ['id', 'system_id'] TYPE_OTHER = 1 TYPE_FC = 2 TYPE_FCOE = 3 TYPE_ISCSI = 4 def __init__(self, _id, _port_type, _service_address, _network_address, _physical_address, _physical_name, _system_id, _plugin_data=None): self._id = _id self._port_type = _port_type self._service_address = _service_address # service_address: # The address used by upper layer like FC and iSCSI: # FC and FCoE: WWPN # iSCSI: IQN # String. Lower case, split with : every two digits if WWPN. self._network_address = _network_address # network_address: # The address used by network layer like FC and TCP/IP: # FC/FCoE: WWPN # iSCSI: IPv4:Port # [IPv6]:Port # String. Lower case, split with : every two digits if WWPN. self._physical_address = _physical_address # physical_address: # The address used by physical layer like FC-0 and MAC: # FC: WWPN # FCoE: WWPN # iSCSI: MAC # String. Lower case, split with : every two digits. self._physical_name = _physical_name # physical_name # The name of physical port. Administrator could use this name to # locate the port on storage system. # String. self._system_id = _system_id self._plugin_data = _plugin_data class Capabilities(IData): UNSUPPORTED = 0 SUPPORTED = 1 _NUM = 512 # Indicate the maximum capability integer _CAP_NUM_BEGIN = 20 # Indicate the first capability integer #Block operations VOLUMES = 20 VOLUME_CREATE = 21 VOLUME_RESIZE = 22 VOLUME_REPLICATE = 23 VOLUME_REPLICATE_CLONE = 24 VOLUME_REPLICATE_COPY = 25 VOLUME_REPLICATE_MIRROR_ASYNC = 26 VOLUME_REPLICATE_MIRROR_SYNC = 27 VOLUME_COPY_RANGE_BLOCK_SIZE = 28 VOLUME_COPY_RANGE = 29 VOLUME_COPY_RANGE_CLONE = 30 VOLUME_COPY_RANGE_COPY = 31 VOLUME_DELETE = 33 VOLUME_ENABLE = 34 VOLUME_DISABLE = 35 VOLUME_MASK = 36 VOLUME_UNMASK = 37 ACCESS_GROUPS = 38 ACCESS_GROUP_CREATE_WWPN = 39 ACCESS_GROUP_DELETE = 40 ACCESS_GROUP_INITIATOR_ADD_WWPN = 41 # For empty access group, this indicate it can add WWPN into it. ACCESS_GROUP_INITIATOR_DELETE = 42 VOLUMES_ACCESSIBLE_BY_ACCESS_GROUP = 43 ACCESS_GROUPS_GRANTED_TO_VOLUME = 44 VOLUME_CHILD_DEPENDENCY = 45 VOLUME_CHILD_DEPENDENCY_RM = 46 ACCESS_GROUP_CREATE_ISCSI_IQN = 47 ACCESS_GROUP_INITIATOR_ADD_ISCSI_IQN = 48 # For empty access group, this indicate it can add iSCSI IQN into it. VOLUME_ISCSI_CHAP_AUTHENTICATION = 53 VOLUME_RAID_INFO = 54 VOLUME_THIN = 55 #File system FS = 100 FS_DELETE = 101 FS_RESIZE = 102 FS_CREATE = 103 FS_CLONE = 104 FILE_CLONE = 105 FS_SNAPSHOTS = 106 FS_SNAPSHOT_CREATE = 107 FS_SNAPSHOT_DELETE = 109 FS_SNAPSHOT_RESTORE = 110 FS_SNAPSHOT_RESTORE_SPECIFIC_FILES = 111 FS_CHILD_DEPENDENCY = 112 FS_CHILD_DEPENDENCY_RM = 113 FS_CHILD_DEPENDENCY_RM_SPECIFIC_FILES = 114 #NFS EXPORT_AUTH = 120 EXPORTS = 121 EXPORT_FS = 122 EXPORT_REMOVE = 123 EXPORT_CUSTOM_PATH = 124 POOLS_QUICK_SEARCH = 210 VOLUMES_QUICK_SEARCH = 211 DISKS_QUICK_SEARCH = 212 ACCESS_GROUPS_QUICK_SEARCH = 213 FS_QUICK_SEARCH = 214 NFS_EXPORTS_QUICK_SEARCH = 215 TARGET_PORTS = 216 TARGET_PORTS_QUICK_SEARCH = 217 DISKS = 220 POOL_MEMBER_INFO = 221 VOLUME_RAID_CREATE = 222 def _to_dict(self): return {'class': self.__class__.__name__, 'cap': ''.join(['%02x' % b for b in self._cap])} def __init__(self, _cap=None): if _cap is not None: self._cap = bytearray(_cap.decode('hex')) else: self._cap = bytearray(Capabilities._NUM) def supported(self, capability): return self.get(capability) == Capabilities.SUPPORTED def get(self, capability): if capability >= len(self._cap): return Capabilities.UNSUPPORTED return self._cap[capability] @staticmethod def _lsm_cap_to_str_dict(): """ Return a dict containing all valid capability: integer => string name """ lsm_cap_to_str_conv = dict() for c_str, c_int in Capabilities.__dict__.items(): if type(c_str) == str and type(c_int) == int and \ c_str[0] != '_' and \ Capabilities._CAP_NUM_BEGIN <= c_int <= Capabilities._NUM: lsm_cap_to_str_conv[c_int] = c_str return lsm_cap_to_str_conv def get_supported(self, all_cap=False): """ Returns a hash of the supported capabilities in the form constant, name """ all_caps = Capabilities._lsm_cap_to_str_dict() if all_cap: return all_caps rc = {} for i in all_caps.keys(): if self._cap[i] == Capabilities.SUPPORTED: if i in all_caps: rc[i] = all_caps[i] return rc def set(self, capability, value=SUPPORTED): self._cap[capability] = value def enable_all(self): for i in range(len(self._cap)): self._cap[i] = Capabilities.SUPPORTED if __name__ == '__main__': #TODO Need some unit tests that encode/decode all the types with nested pass libstoragemgmt-1.2.3/python_binding/lsm/version.py.in0000664000175000017500000000136012537737032017743 00000000000000# Copyright (C) 2011-2013 Red Hat, Inc. # This library is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2.1 of the License, or any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library; If not, see . # # Author: tasleson #To be filled in by autoconf VERSION = "@VERSION@" libstoragemgmt-1.2.3/python_binding/lsm/version.py0000664000175000017500000000135412542455451017337 00000000000000# Copyright (C) 2011-2013 Red Hat, Inc. # This library is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2.1 of the License, or any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library; If not, see . # # Author: tasleson #To be filled in by autoconf VERSION = "1.2.3" libstoragemgmt-1.2.3/python_binding/lsm/_iplugin.py0000664000175000017500000004110612537737032017461 00000000000000# Copyright (C) 2011-2013 Red Hat, Inc. # This library is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2.1 of the License, or any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library; If not, see . # # Author: tasleson from abc import ABCMeta as _ABCMeta from abc import abstractmethod as _abstractmethod from lsm import LsmError, ErrorNumber class IPlugin(object): """ Plug-in interface that all plug-ins must implement for basic operation. """ __metaclass__ = _ABCMeta @_abstractmethod def plugin_register(self, uri, password, timeout, flags=0): """ Method first called to setup the plug-in (except for plugin_info) This would be the place to make a connection to the array. Returns None on success, else LsmError exception """ pass @_abstractmethod def time_out_set(self, ms, flags=0): """ Sets any time-outs for the plug-in (ms) Returns None on success, else LsmError exception """ pass @_abstractmethod def time_out_get(self, flags=0): """ Retrieves the current time-out Returns time-out in ms, else raise LsmError """ pass @_abstractmethod def plugin_unregister(self, flags=0): """ Called when the client wants to finish up or the socket goes eof. Plug-in should clean up all resources. Note: In the case where the socket goes EOF and the plugin_unregister runs into errors the exception(s) will not be delivered to the client! Returns None on success, else LsmError exception """ pass @_abstractmethod def job_status(self, job_id, flags=0): """ Returns the stats of the given job. Returns a tuple ( status (enumeration), percent_complete, completed item). else LsmError exception. """ pass @_abstractmethod def job_free(self, job_id, flags=0): """ Frees resources for a given job. Returns None on success, else raises an LsmError """ pass @_abstractmethod def capabilities(self, system, flags=0): """ Returns the capabilities for the selected system, raises LsmError """ pass @_abstractmethod def plugin_info(self, flags=0): """ Returns the description and version for plug-in, raises LsmError Note: Make sure plugin can handle this call before plugin_register is called. """ pass @_abstractmethod def pools(self, search_key=None, search_value=None, flags=0): """ Returns an array of pool objects. Pools are used in both block and file system interfaces, thus the reason they are in the base class. Raises LsmError on error """ pass @_abstractmethod def systems(self, flags=0): """ Returns an array of system objects. System information is used to distinguish resources from on storage array to another when the plug=in supports the ability to have more than one array managed by it Raises LsmError on error """ pass class IStorageAreaNetwork(IPlugin): def volumes(self, search_key=None, search_value=None, flags=0): """ Returns an array of volume objects Raises LsmError on error """ raise LsmError(ErrorNumber.NO_SUPPORT, "Not supported") def volume_create(self, pool, volume_name, size_bytes, provisioning, flags=0): """ Creates a volume, given a pool, volume name, size and provisioning Returns a tuple (job_id, new volume) Note: Tuple return values are mutually exclusive, when one is None the other must be valid. """ raise LsmError(ErrorNumber.NO_SUPPORT, "Not supported") def volume_delete(self, volume, flags=0): """ Deletes a volume. Returns Job id or None if completed, else raises LsmError on errors. """ raise LsmError(ErrorNumber.NO_SUPPORT, "Not supported") def volume_resize(self, volume, new_size_bytes, flags=0): """ Re-sizes a volume. Returns a tuple (job_id, re-sized_volume) Note: Tuple return values are mutually exclusive, when one is None the other must be valid. """ raise LsmError(ErrorNumber.NO_SUPPORT, "Not supported") def volume_replicate(self, pool, rep_type, volume_src, name, flags=0): """ Replicates a volume from the specified pool. In this library, to replicate means to create a new volume which is a copy of the source. Returns a tuple (job_id, replicated volume) Note: Tuple return values are mutually exclusive, when one is None the other must be valid. """ raise LsmError(ErrorNumber.NO_SUPPORT, "Not supported") def volume_replicate_range_block_size(self, system, flags=0): """ Returns the number of bytes per block for volume_replicate_range call. Callers of volume_replicate_range need to use this when calculating start and block lengths. Note: bytes per block may not match volume blocksize. Returns bytes per block, Raises LsmError on error """ raise LsmError(ErrorNumber.NO_SUPPORT, "Not supported") def volume_replicate_range(self, rep_type, volume_src, volume_dest, ranges, flags=0): """ Replicates a portion of a volume to itself or another volume. The src, dest and number of blocks values change with vendor, call volume_replicate_range_block_size to get block unit size. Returns Job id or None if completed, else raises LsmError on errors. """ raise LsmError(ErrorNumber.NO_SUPPORT, "Not supported") def volume_enable(self, volume, flags=0): """ Makes a volume available to the host Returns None on success, else raises LsmError on errors. """ raise LsmError(ErrorNumber.NO_SUPPORT, "Not supported") def volume_disable(self, volume, flags=0): """ Makes a volume unavailable to the host Returns None on success, else raises LsmError on errors. """ raise LsmError(ErrorNumber.NO_SUPPORT, "Not supported") def iscsi_chap_auth(self, init_id, in_user, in_password, out_user, out_password, flags): """ Register a user/password for the specified initiator for CHAP authentication. in_user & in_password are for inbound CHAP, out_user & out_password are for outbound CHAP. Note: Setting in_user, in_password or out_user, out_password to None will disable authentication. Raises LsmError on error """ raise LsmError(ErrorNumber.NO_SUPPORT, "Not supported") def volume_mask(self, access_group, volume, flags=0): """ Allows an access group to access a volume. Returns None on success, else raises LsmError on errors. """ raise LsmError(ErrorNumber.NO_SUPPORT, "Not supported") def volume_unmask(self, access_group, volume, flags=0): """ Revokes access for an access group for a volume Returns None on success, else raises LsmError on errors. """ raise LsmError(ErrorNumber.NO_SUPPORT, "Not supported") def access_groups(self, search_key=None, search_value=None, flags=0): """ Returns a list of access groups, raises LsmError on errors. """ raise LsmError(ErrorNumber.NO_SUPPORT, "Not supported") def access_group_create(self, name, init_id, init_type, system, flags=0): """ Returns a list of access groups, raises LsmError on errors. """ raise LsmError(ErrorNumber.NO_SUPPORT, "Not supported") def access_group_delete(self, access_group, flags=0): """ Deletes an access group, Raises LsmError on error """ raise LsmError(ErrorNumber.NO_SUPPORT, "Not supported") def access_group_initiator_add(self, access_group, init_id, init_type, flags=0): """ Adds an initiator to an access group, Raises LsmError on error """ raise LsmError(ErrorNumber.NO_SUPPORT, "Not supported") def access_group_initiator_delete(self, access_group, init_id, init_type, flags=0): """ Deletes an initiator from an access group, Raises LsmError on error """ raise LsmError(ErrorNumber.NO_SUPPORT, "Not supported") def volumes_accessible_by_access_group(self, access_group, flags=0): """ Returns the list of volumes that access group has access to. Raises LsmError on error """ raise LsmError(ErrorNumber.NO_SUPPORT, "Not supported") def access_groups_granted_to_volume(self, volume, flags=0): """ Returns the list of access groups that have access to the specified, Raises LsmError on error """ raise LsmError(ErrorNumber.NO_SUPPORT, "Not supported") def volume_child_dependency(self, volume, flags=0): """ Returns True if this volume has other volumes which are dependant on it. Implies that this volume cannot be deleted or possibly modified because it would affect its children. """ raise LsmError(ErrorNumber.NO_SUPPORT, "Not supported") def volume_child_dependency_rm(self, volume, flags=0): """ If this volume has child dependency, this method call will fully replicate the blocks removing the relationship between them. This should return None (success) if volume_child_dependency would return False. Note: This operation could take a very long time depending on the size of the volume and the number of child dependencies. Returns None if complete else job id, raises LsmError on errors. """ raise LsmError(ErrorNumber.NO_SUPPORT, "Not supported") def target_ports(self, search_key=None, search_value=None, flags=0): raise LsmError(ErrorNumber.NO_SUPPORT, "Not supported") class INetworkAttachedStorage(IPlugin): """ Class the represents Network attached storage (Common NFS/CIFS operations) """ def fs(self, search_key=None, search_value=None, flags=0): """ Returns a list of file systems on the controller. Raises LsmError on errors. """ pass def fs_delete(self, fs, flags=0): """ WARNING: Destructive Deletes a file system and everything it contains Returns None on success, else job id """ raise LsmError(ErrorNumber.NO_SUPPORT, "Not supported") def fs_resize(self, fs, new_size_bytes, flags=0): """ Re-size a file system Returns a tuple (job_id, re-sized file system) Note: Tuple return values are mutually exclusive, when one is None the other must be valid. """ raise LsmError(ErrorNumber.NO_SUPPORT, "Not supported") def fs_create(self, pool, name, size_bytes, flags=0): """ Creates a file system given a pool, name and size. Note: size is limited to 2**64 bytes so max size of a single volume at this time is 16 Exabytes Returns a tuple (job_id, file system) Note: Tuple return values are mutually exclusive, when one is None the other must be valid. """ raise LsmError(ErrorNumber.NO_SUPPORT, "Not supported") def fs_clone(self, src_fs, dest_fs_name, snapshot=None, flags=0): """ Creates a thin, point in time read/writable copy of src to dest. Optionally uses snapshot as backing of src_fs Returns a tuple (job_id, file system) Note: Tuple return values are mutually exclusive, when one is None the other must be valid. """ raise LsmError(ErrorNumber.NO_SUPPORT, "Not supported") def fs_file_clone(self, fs, src_file_name, dest_file_name, snapshot=None, flags=0): """ Creates a thinly provisioned clone of src to dest. Note: Source and Destination are required to be on same filesystem Returns Job id or None if completed, else raises LsmError on errors. """ raise LsmError(ErrorNumber.NO_SUPPORT, "Not supported") def fs_snapshots(self, fs, flags=0): """ Returns a list of snapshots for the supplied file system, Raises LsmError on error """ raise LsmError(ErrorNumber.NO_SUPPORT, "Not supported") def fs_snapshot_create(self, fs, snapshot_name, flags=0): """ Snapshot is a point in time read-only copy Create a snapshot on the chosen file system Returns a tuple (job_id, snap shot created) Note: Tuple return values are mutually exclusive, when one is None the other must be valid. Note: Snapshot name may not match what was passed in (depends on array implementation) """ raise LsmError(ErrorNumber.NO_SUPPORT, "Not supported") def fs_snapshot_delete(self, fs, snapshot, flags=0): """ Frees the re-sources for the given snapshot on the supplied filesystem. Returns Job id or None if completed, else raises LsmError on errors. """ raise LsmError(ErrorNumber.NO_SUPPORT, "Not supported") def fs_snapshot_restore(self, fs, snapshot, files, restore_files, all_files=False, flags=0): """ WARNING: Destructive! Reverts a file-system or just the specified files from the snapshot. If a list of files is supplied but the array cannot restore just them then the operation will fail with an LsmError raised. If files == None and all_files = True then all files on the file-system are restored. Restore_file if not None must be the same length as files with each index in each list referring to the associated file. Returns None on success, else job id, LsmError exception on error """ raise LsmError(ErrorNumber.NO_SUPPORT, "Not supported") def fs_child_dependency(self, fs, files, flags=0): """ Returns True if the specified filesystem or specified file on this file system has child dependencies. This implies that this filesystem or specified file on this file system cannot be deleted or possibly modified because it would affect its children. """ raise LsmError(ErrorNumber.NO_SUPPORT, "Not supported") def fs_child_dependency_rm(self, fs, files, flags=0): """ If this filesystem or specified file on this filesystem has child dependency this method will fully replicate the blocks removing the relationship between them. This should return None(success) if fs_child_dependency would return False. Note: This operation could take a very long time depending on the size of the filesystem and the number of child dependencies. Returns Job id or None if completed, else raises LsmError on errors. """ raise LsmError(ErrorNumber.NO_SUPPORT, "Not supported") class INfs(INetworkAttachedStorage): def export_auth(self, flags=0): """ Returns the types of authentication that are available for NFS """ raise LsmError(ErrorNumber.NO_SUPPORT, "Not supported") def exports(self, search_key=None, search_value=None, flags=0): """ Get a list of all exported file systems on the controller. """ raise LsmError(ErrorNumber.NO_SUPPORT, "Not supported") def export_fs(self, fs_id, export_path, root_list, rw_list, ro_list, anon_uid, anon_gid, auth_type, options, flags=0): """ Exports a filesystem as specified in the export """ raise LsmError(ErrorNumber.NO_SUPPORT, "Not supported") def export_remove(self, export, flags=0): """ Removes the specified export """ raise LsmError(ErrorNumber.NO_SUPPORT, "Not supported") libstoragemgmt-1.2.3/python_binding/lsm/_client.py0000664000175000017500000016462512537737032017304 00000000000000# Copyright (C) 2011-2014 Red Hat, Inc. # This library is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2.1 of the License, or any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library; If not, see . # # Author: tasleson import os from lsm import (Volume, NfsExport, Capabilities, Pool, System, Disk, AccessGroup, FileSystem, FsSnapshot, uri_parse, LsmError, ErrorNumber, INetworkAttachedStorage, TargetPort) from _common import return_requires as _return_requires from _common import UDS_PATH as _UDS_PATH from _transport import TransPort as _TransPort from _data import IData as _IData ## Removes self for the hash d # @param d Hash to remove self from # @returns d with hash removed. def _del_self(d): """ Used to remove the self key from the dict d. Self is included when calling the function locals() in a class method. """ del d['self'] return d def _check_search_key(search_key, supported_keys): if search_key and search_key not in supported_keys: raise LsmError(ErrorNumber.UNSUPPORTED_SEARCH_KEY, "Unsupported search_key: '%s'" % search_key) return ## Descriptive exception about daemon not running. def _raise_no_daemon(): raise LsmError(ErrorNumber.DAEMON_NOT_RUNNING, "The libStorageMgmt daemon is not running (process " "name lsmd), try 'service libstoragemgmt start'") ## Main client class for library. # ** IMPORTANT ** # Theory of operation for methods in this class. # We are using the name of the method and the name of the parameters and # using python introspection abilities to translate them to the method and # parameter names. Makes the code compact, but you will break things if the # IPlugin class does not match the method names and parameters here! class Client(INetworkAttachedStorage): ## # Used for default flag value # FLAG_RSVD = 0 """ Client side class used for managing storage that utilises RPC mechanism. """ ## Method added so that the interface for the client RPC and the plug-in ## itself match. def plugin_register(self, uri, plain_text_password, timeout_ms, flags=0): raise RuntimeError("Do not call directly!") ## Called when we are ready to initialize the plug-in. # @param self The this pointer # @param uri The uniform resource identifier # @param plain_text_password Password as plain text # @param timeout_ms The timeout in ms # @param flags Reserved for future use, must be zero. # @returns None def __start(self, uri, password, timeout, flags=0): """ Instruct the plug-in to get ready """ self._tp.rpc('plugin_register', _del_self(locals())) ## Checks to see if any unix domain sockets exist in the base directory # and opens a socket to one to see if the server is actually there. # @param self The this pointer # @returns True if daemon appears to be present, else false. @staticmethod def _check_daemon_exists(): uds_path = Client._plugin_uds_path() if os.path.exists(uds_path): for root, sub_folders, files in os.walk(uds_path): for filename in files: uds = os.path.join(root, filename) try: #This operation will work if the daemon is available s = _TransPort.get_socket(uds) s.close() return True except LsmError: pass else: #Base directory is not present? pass return False @staticmethod def _plugin_uds_path(): rc = _UDS_PATH if 'LSM_UDS_PATH' in os.environ: rc = os.environ['LSM_UDS_PATH'] return rc ## Class constructor # @param self The this pointer # @param uri The uniform resource identifier # @param plain_text_password Password as plain text (Optional) # @param timeout_ms The timeout in ms # @param flags Reserved for future use, must be zero. # @returns None def __init__(self, uri, plain_text_password=None, timeout_ms=30000, flags=0): self._uri = uri self._password = plain_text_password self._timeout = timeout_ms self._uds_path = Client._plugin_uds_path() u = uri_parse(uri, ['scheme']) scheme = u['scheme'] if "+" in scheme: (plug, proto) = scheme.split("+") scheme = plug self.plugin_path = os.path.join(self._uds_path, scheme) if os.path.exists(self.plugin_path): self._tp = _TransPort(_TransPort.get_socket(self.plugin_path)) else: #At this point we don't know if the user specified an incorrect #plug-in in the URI or the daemon isn't started. We will check #the directory for other unix domain sockets. if Client._check_daemon_exists(): raise LsmError(ErrorNumber.PLUGIN_NOT_EXIST, "Plug-in %s not found!" % self.plugin_path) else: _raise_no_daemon() self.__start(uri, plain_text_password, timeout_ms, flags) ## Synonym for close. @_return_requires(None) def plugin_unregister(self, flags=FLAG_RSVD): """ Synonym for close. """ self.close(flags) ## Does an orderly plugin_unregister of the plug-in # @param self The this pointer # @param flags Reserved for future use, must be zero. @_return_requires(None) def close(self, flags=FLAG_RSVD): """ Does an orderly plugin_unregister of the plug-in """ self._tp.rpc('plugin_unregister', _del_self(locals())) self._tp.close() self._tp = None ## Retrieves all the available plug-ins @staticmethod @_return_requires([unicode]) def available_plugins(field_sep=':', flags=FLAG_RSVD): """ Retrieves all the available plug-ins Return list of strings of available plug-ins with the "descversion" """ rc = [] if not Client._check_daemon_exists(): _raise_no_daemon() uds_path = Client._plugin_uds_path() for root, sub_folders, files in os.walk(uds_path): for filename in files: uds = os.path.join(root, filename) tp = _TransPort(_TransPort.get_socket(uds)) i, v = tp.rpc('plugin_info', dict(flags=Client.FLAG_RSVD)) rc.append("%s%s%s" % (i, field_sep, v)) tp.close() return rc ## Sets the timeout for the plug-in # @param self The this pointer # @param ms Time-out in ms # @param flags Reserved for future use, must be zero. @_return_requires(None) def time_out_set(self, ms, flags=FLAG_RSVD): """ Sets any time-outs for the plug-in (ms) Return None on success, else LsmError exception """ return self._tp.rpc('time_out_set', _del_self(locals())) ## Retrieves the current time-out value. # @param self The this pointer # @param flags Reserved for future use, must be zero. # @returns Time-out value @_return_requires(int) def time_out_get(self, flags=FLAG_RSVD): """ Retrieves the current time-out Return time-out in ms, else raise LsmError """ return self._tp.rpc('time_out_get', _del_self(locals())) ## Retrieves the status of the specified job id. # @param self The this pointer # @param job_id The job identifier # @param flags Reserved for future use, must be zero. # @returns A tuple ( status (enumeration), percent_complete, # completed item) @_return_requires(int, int, _IData) def job_status(self, job_id, flags=FLAG_RSVD): """ Returns the stats of the given job. Returns a tuple ( status (enumeration), percent_complete, completed item). else LsmError exception. """ return self._tp.rpc('job_status', _del_self(locals())) ## Frees the resources for the specified job id. # @param self The this pointer # @param job_id Job id in which to release resource for # @param flags Reserved for future use, must be zero. @_return_requires(None) def job_free(self, job_id, flags=FLAG_RSVD): """ Frees resources for a given job number. Returns None on success, else raises an LsmError """ return self._tp.rpc('job_free', _del_self(locals())) ## Gets the capabilities of the array. # @param self The this pointer # @param system The system of interest # @param flags Reserved for future use, must be zero. # @returns Capability object @_return_requires(Capabilities) def capabilities(self, system, flags=FLAG_RSVD): """ Fetches the capabilities of the array Returns a capability object, see data,py for details. """ return self._tp.rpc('capabilities', _del_self(locals())) ## Gets information about the plug-in # @param self The this pointer # @param flags Reserved for future use # @returns Tuple (description, version) @_return_requires(unicode, unicode) def plugin_info(self, flags=FLAG_RSVD): """ Returns a description and version of plug-in """ return self._tp.rpc('plugin_info', _del_self(locals())) ## Returns an array of pool objects. # @param self The this pointer # @param search_key Search key # @param search_value Search value # @param flags Reserved for future use, must be zero. # @returns An array of pool objects. @_return_requires([Pool]) def pools(self, search_key=None, search_value=None, flags=FLAG_RSVD): """ Returns an array of pool objects. Pools are used in both block and file system interfaces, thus the reason they are in the base class. """ _check_search_key(search_key, Pool.SUPPORTED_SEARCH_KEYS) return self._tp.rpc('pools', _del_self(locals())) ## Returns an array of system objects. # @param self The this pointer # @param flags Reserved for future use, must be zero. # @returns An array of system objects. @_return_requires([System]) def systems(self, flags=FLAG_RSVD): """ Returns an array of system objects. System information is used to distinguish resources from on storage array to another when the plug=in supports the ability to have more than one array managed by it """ return self._tp.rpc('systems', _del_self(locals())) ## Register a user/password for the specified initiator for CHAP # authentication. # Note: If you pass an empty user and password the expected behavior is to # remove any authentication for the specified initiator. # @param self The this pointer # @param init_id The initiator ID # @param in_user User for inbound CHAP # @param in_password Password for inbound CHAP # @param out_user Outbound username # @param out_password Outbound password # @param flags Reserved for future use, must be zero. # @returns None on success, throws LsmError on errors. @_return_requires(None) def iscsi_chap_auth(self, init_id, in_user, in_password, out_user, out_password, flags=FLAG_RSVD): """ Register a user/password for the specified initiator for CHAP authentication. """ AccessGroup.initiator_id_verify(init_id, AccessGroup.INIT_TYPE_ISCSI_IQN, raise_exception=True) return self._tp.rpc('iscsi_chap_auth', _del_self(locals())) ## Returns an array of volume objects # @param self The this pointer # @param search_key Search key to use # @param search_value Search value # @param flags Reserved for future use, must be zero. # @returns An array of volume objects. @_return_requires([Volume]) def volumes(self, search_key=None, search_value=None, flags=FLAG_RSVD): """ Returns an array of volume objects """ _check_search_key(search_key, Volume.SUPPORTED_SEARCH_KEYS) return self._tp.rpc('volumes', _del_self(locals())) ## Creates a volume # @param self The this pointer # @param pool The pool object to allocate storage from # @param volume_name The human text name for the volume # @param size_bytes Size of the volume in bytes # @param provisioning How the volume is to be provisioned # @param flags Reserved for future use, must be zero. # @returns A tuple (job_id, new volume), when one is None the other is # valid. @_return_requires(unicode, Volume) def volume_create(self, pool, volume_name, size_bytes, provisioning, flags=FLAG_RSVD): """ Creates a volume, given a pool, volume name, size and provisioning returns a tuple (job_id, new volume) Note: Tuple return values are mutually exclusive, when one is None the other must be valid. """ return self._tp.rpc('volume_create', _del_self(locals())) ## Re-sizes a volume # @param self The this pointer # @param volume The volume object to re-size # @param new_size_bytes Size of the volume in bytes # @param flags Reserved for future use, must be zero. # @returns A tuple (job_id, new re-sized volume), when one is # None the other is valid. @_return_requires(unicode, Volume) def volume_resize(self, volume, new_size_bytes, flags=FLAG_RSVD): """ Re-sizes a volume. Returns a tuple (job_id, re-sized_volume) Note: Tuple return values are mutually exclusive, when one is None the other must be valid. """ return self._tp.rpc('volume_resize', _del_self(locals())) ## Replicates a volume from the specified pool. # @param self The this pointer # @param pool The pool to re-size from # @param rep_type Replication type # (enumeration,see common.data.Volume) # @param volume_src The volume to replicate # @param name Human readable name of replicated volume # @param flags Reserved for future use, must be zero. # @returns A tuple (job_id, new replicated volume), when one is # None the other is valid. @_return_requires(unicode, Volume) def volume_replicate(self, pool, rep_type, volume_src, name, flags=FLAG_RSVD): """ Replicates a volume from the specified pool. Returns a tuple (job_id, replicated volume) Note: Tuple return values are mutually exclusive, when one is None the other must be valid. """ return self._tp.rpc('volume_replicate', _del_self(locals())) ## Size of a replicated block. # @param self The this pointer # @param system The system to request the rep. block range size from # @param flags Reserved for future use, must be zero # @returns Size of the replicated block in bytes @_return_requires(int) def volume_replicate_range_block_size(self, system, flags=FLAG_RSVD): """ Returns the size of a replicated block in bytes. """ return self._tp.rpc('volume_replicate_range_block_size', _del_self(locals())) ## Replicates a portion of a volume to itself or another volume. # @param self The this pointer # @param rep_type Replication type # (enumeration, see common.data.Volume) # @param volume_src The volume src to replicate from # @param volume_dest The volume dest to replicate to # @param ranges An array of Block range objects # @see lsm.common.data.BlockRange # @param flags Reserved for future use, must be zero. # @returns Job id or None when completed, else raises LsmError on errors. @_return_requires(unicode) def volume_replicate_range(self, rep_type, volume_src, volume_dest, ranges, flags=FLAG_RSVD): """ Replicates a portion of a volume to itself or another volume. The src, dest and number of blocks values change with vendor, call volume_replicate_range_block_size to get block unit size. Returns Job id or None when completed, else raises LsmError on errors. """ return self._tp.rpc('volume_replicate_range', _del_self(locals())) ## Deletes a volume # @param self The this pointer # @param volume The volume object which represents the volume to delete # @param flags Reserved for future use, must be zero. # @returns None on success, else job id. Raises LsmError on errors. @_return_requires(unicode) def volume_delete(self, volume, flags=FLAG_RSVD): """ Deletes a volume. Returns None on success, else job id """ return self._tp.rpc('volume_delete', _del_self(locals())) ## Makes a volume online and available to the host. # @param self The this pointer # @param volume The volume to place online # @param flags Reserved for future use, must be zero. # @returns None on success, else raises LsmError @_return_requires(None) def volume_enable(self, volume, flags=FLAG_RSVD): """ Makes a volume available to the host returns None on success, else raises LsmError on errors. """ return self._tp.rpc('volume_enable', _del_self(locals())) ## Takes a volume offline # @param self The this pointer # @param volume The volume object # @param flags Reserved for future use, must be zero. # @returns None on success, else raises LsmError on errors. @_return_requires(None) def volume_disable(self, volume, flags=FLAG_RSVD): """ Makes a volume unavailable to the host returns None on success, else raises LsmError on errors. """ return self._tp.rpc('volume_disable', _del_self(locals())) ## Returns an array of disk objects # @param self The this pointer # @param search_key Search Key # @param search_value Search value # @param flags When equal to DISK.FLAG_RETRIEVE_FULL_INFO # returned objects will contain optional data. # If not defined, only the mandatory properties will # be returned. # @returns An array of disk objects. @_return_requires([Disk]) def disks(self, search_key=None, search_value=None, flags=FLAG_RSVD): """ Returns an array of disk objects """ _check_search_key(search_key, Disk.SUPPORTED_SEARCH_KEYS) return self._tp.rpc('disks', _del_self(locals())) ## Access control for allowing an access group to access a volume # @param self The this pointer # @param access_group The access group # @param volume The volume to grant access to # @param flags Reserved for future use, must be zero. # @returns None on success, throws LsmError on errors. @_return_requires(None) def volume_mask(self, access_group, volume, flags=FLAG_RSVD): """ Allows an access group to access a volume. """ return self._tp.rpc('volume_mask', _del_self(locals())) ## Revokes access to a volume to initiators in an access group # @param self The this pointer # @param access_group The access group # @param volume The volume to grant access to # @param flags Reserved for future use, must be zero. # @returns None on success, throws LsmError on errors. @_return_requires(None) def volume_unmask(self, access_group, volume, flags=FLAG_RSVD): """ Revokes access for an access group for a volume """ return self._tp.rpc('volume_unmask', _del_self(locals())) ## Returns a list of access group objects # @param self The this pointer # @param search_key Search Key # @param search_value Search value # @param flags Reserved for future use, must be zero. # @returns List of access groups @_return_requires([AccessGroup]) def access_groups(self, search_key=None, search_value=None, flags=FLAG_RSVD): """ Returns a list of access groups """ _check_search_key(search_key, AccessGroup.SUPPORTED_SEARCH_KEYS) return self._tp.rpc('access_groups', _del_self(locals())) ## Creates an access a group with the specified initiator in it. # @param self The this pointer # @param name The initiator group name # @param init_id Initiator id # @param init_type Type of initiator (Enumeration) # @param system Which system to create this group on # @param flags Reserved for future use, must be zero. # @returns AccessGroup on success, else raises LsmError @_return_requires(AccessGroup) def access_group_create(self, name, init_id, init_type, system, flags=FLAG_RSVD): """ Creates an access group and add the specified initiator id, init_type and desired access. """ init_type, init_id = AccessGroup.initiator_id_verify( init_id, init_type, raise_exception=True)[1:] return self._tp.rpc('access_group_create', _del_self(locals())) ## Deletes an access group. # @param self The this pointer # @param access_group The access group to delete # @param flags Reserved for future use, must be zero. # @returns None on success, throws LsmError on errors. @_return_requires(None) def access_group_delete(self, access_group, flags=FLAG_RSVD): """ Deletes an access group """ return self._tp.rpc('access_group_delete', _del_self(locals())) ## Adds an initiator to an access group # @param self The this pointer # @param access_group Group to add initiator to # @param init_id Initiators id # @param init_type Initiator id type (enumeration) # @param flags Reserved for future use, must be zero. # @returns None on success, throws LsmError on errors. @_return_requires(AccessGroup) def access_group_initiator_add(self, access_group, init_id, init_type, flags=FLAG_RSVD): """ Adds an initiator to an access group """ init_type, init_id = AccessGroup.initiator_id_verify( init_id, init_type, raise_exception=True)[1:] return self._tp.rpc('access_group_initiator_add', _del_self(locals())) ## Deletes an initiator from an access group # @param self The this pointer # @param access_group The access group to remove initiator from # @param init_id The initiator to remove from the group # @param init_type Initiator id type (enumeration) # @param flags Reserved for future use, must be zero. # @returns None on success, throws LsmError on errors. @_return_requires(AccessGroup) def access_group_initiator_delete(self, access_group, init_id, init_type, flags=FLAG_RSVD): """ Deletes an initiator from an access group """ init_id = AccessGroup.initiator_id_verify(init_id, None, raise_exception=True)[2] return self._tp.rpc('access_group_initiator_delete', _del_self(locals())) ## Returns the list of volumes that access group has access to. # @param self The this pointer # @param access_group The access group to list volumes for # @param flags Reserved for future use, must be zero. # @returns list of volumes @_return_requires([Volume]) def volumes_accessible_by_access_group(self, access_group, flags=FLAG_RSVD): """ Returns the list of volumes that access group has access to. """ return self._tp.rpc('volumes_accessible_by_access_group', _del_self(locals())) ##Returns the list of access groups that have access to the specified #volume. # @param self The this pointer # @param volume The volume to list access groups for # @param flags Reserved for future use, must be zero. # @returns list of access groups @_return_requires([AccessGroup]) def access_groups_granted_to_volume(self, volume, flags=FLAG_RSVD): """ Returns the list of access groups that have access to the specified volume. """ return self._tp.rpc('access_groups_granted_to_volume', _del_self(locals())) ## Checks to see if a volume has child dependencies. # @param self The this pointer # @param volume The volume to check # @param flags Reserved for future use, must be zero. # @returns True or False @_return_requires(bool) def volume_child_dependency(self, volume, flags=FLAG_RSVD): """ Returns True if this volume has other volumes which are dependant on it. Implies that this volume cannot be deleted or possibly modified because it would affect its children. """ return self._tp.rpc('volume_child_dependency', _del_self(locals())) ## Removes any child dependency. # @param self The this pointer # @param volume The volume to remove dependencies for # @param flags Reserved for future use, must be zero. # @returns None if complete, else job id. @_return_requires(unicode) def volume_child_dependency_rm(self, volume, flags=FLAG_RSVD): """ If this volume has child dependency, this method call will fully replicate the blocks removing the relationship between them. This should return None (success) if volume_child_dependency would return False. Note: This operation could take a very long time depending on the size of the volume and the number of child dependencies. Returns None if complete else job id, raises LsmError on errors. """ return self._tp.rpc('volume_child_dependency_rm', _del_self(locals())) ## Returns a list of file system objects. # @param self The this pointer # @param search_key Search Key # @param search_value Search value # @param flags Reserved for future use, must be zero. # @returns A list of FS objects. @_return_requires([FileSystem]) def fs(self, search_key=None, search_value=None, flags=FLAG_RSVD): """ Returns a list of file systems on the controller. """ _check_search_key(search_key, FileSystem.SUPPORTED_SEARCH_KEYS) return self._tp.rpc('fs', _del_self(locals())) ## Deletes a file system # @param self The this pointer # @param fs The file system to delete # @param flags Reserved for future use, must be zero. # @returns None on success, else job id @_return_requires(unicode) def fs_delete(self, fs, flags=FLAG_RSVD): """ WARNING: Destructive Deletes a file system and everything it contains Returns None on success, else job id """ return self._tp.rpc('fs_delete', _del_self(locals())) ## Re-sizes a file system # @param self The this pointer # @param fs The file system to re-size # @param new_size_bytes The new size of the file system in bytes # @param flags Reserved for future use, must be zero. # @returns tuple (job_id, re-sized file system), # When one is None the other is valid @_return_requires(unicode, FileSystem) def fs_resize(self, fs, new_size_bytes, flags=FLAG_RSVD): """ Re-size a file system Returns a tuple (job_id, re-sized file system) Note: Tuple return values are mutually exclusive, when one is None the other must be valid. """ return self._tp.rpc('fs_resize', _del_self(locals())) ## Creates a file system. # @param self The this pointer # @param pool The pool object to allocate space from # @param name The human text name for the file system # @param size_bytes The size of the file system in bytes # @param flags Reserved for future use, must be zero. # @returns tuple (job_id, file system), # When one is None the other is valid @_return_requires(unicode, FileSystem) def fs_create(self, pool, name, size_bytes, flags=FLAG_RSVD): """ Creates a file system given a pool, name and size. Note: size is limited to 2**64 bytes Returns a tuple (job_id, file system) Note: Tuple return values are mutually exclusive, when one is None the other must be valid. """ return self._tp.rpc('fs_create', _del_self(locals())) ## Clones a file system # @param self The this pointer # @param src_fs The source file system to clone # @param dest_fs_name The destination file system clone name # @param snapshot Optional, create clone from previous snapshot # @param flags Reserved for future use, must be zero. # @returns tuple (job_id, file system) @_return_requires(unicode, FileSystem) def fs_clone(self, src_fs, dest_fs_name, snapshot=None, flags=FLAG_RSVD): """ Creates a thin, point in time read/writable copy of src to dest. Optionally uses snapshot as backing of src_fs Returns a tuple (job_id, file system) Note: Tuple return values are mutually exclusive, when one is None the other must be valid. """ return self._tp.rpc('fs_clone', _del_self(locals())) ## Clones an individual file or files on the specified file system # @param self The this pointer # @param fs The file system the files are on # @param src_file_name The source file name # @param dest_file_name The dest. file name # @param snapshot Optional, the snapshot to base clone source # file from # @param flags Reserved for future use, must be zero. # @returns None on success, else job id @_return_requires(unicode) def fs_file_clone(self, fs, src_file_name, dest_file_name, snapshot=None, flags=FLAG_RSVD): """ Creates a thinly provisioned clone of src to dest. Note: Source and Destination are required to be on same filesystem and all directories in destination path need to exist. Returns None on success, else job id """ return self._tp.rpc('fs_file_clone', _del_self(locals())) ## Returns a list of snapshots # @param self The this pointer # @param fs The file system # @param flags Reserved for future use, must be zero. # @returns a list of snapshot objects. @_return_requires([FsSnapshot]) def fs_snapshots(self, fs, flags=FLAG_RSVD): """ Returns a list of snapshot names for the supplied file system """ return self._tp.rpc('fs_snapshots', _del_self(locals())) ## Creates a snapshot (Point in time read only copy) # @param self The this pointer # @param fs The file system to snapshot # @param snapshot_name The human readable snapshot name # @param flags Reserved for future use, must be zero. # @returns tuple (job_id, snapshot) @_return_requires(unicode, FsSnapshot) def fs_snapshot_create(self, fs, snapshot_name, flags=FLAG_RSVD): """ Snapshot is a point in time read-only copy Create a snapshot on the chosen file system. Returns a tuple (job_id, snapshot) Notes: - Snapshot name may not match what was passed in (depends on array implementation) - Tuple return values are mutually exclusive, when one is None the other must be valid. """ return self._tp.rpc('fs_snapshot_create', _del_self(locals())) ## Deletes a snapshot # @param self The this pointer # @param fs The filesystem the snapshot it for # @param snapshot The specific snap shot to delete # @param flags Reserved for future use, must be zero. # @returns None on success, else job id @_return_requires(unicode) def fs_snapshot_delete(self, fs, snapshot, flags=FLAG_RSVD): """ Frees the re-sources for the given snapshot on the supplied filesystem. Returns None on success else job id, LsmError exception on error """ return self._tp.rpc('fs_snapshot_delete', _del_self(locals())) ## Reverts a snapshot # @param self The this pointer # @param fs The file system object to restore snapshot for # @param snapshot The snapshot file to restore back too # @param files The specific files to restore # @param restore_files Individual files to restore # @param all_files Set to True if all files should be restored # back # @param flags Reserved for future use, must be zero. # @return None on success, else job id @_return_requires(unicode) def fs_snapshot_restore(self, fs, snapshot, files, restore_files, all_files=False, flags=FLAG_RSVD): """ WARNING: Destructive! Reverts a file-system or just the specified files from the snapshot. If a list of files is supplied but the array cannot restore just them then the operation will fail with an LsmError raised. If files == None and all_files = True then all files on the file-system are restored. Restore_file if None none must be the same length as files with each index in each list referring to the associated file. Returns None on success, else job id, LsmError exception on error """ return self._tp.rpc('fs_snapshot_restore', _del_self(locals())) ## Checks to see if a file system has child dependencies. # @param fs The file system to check # @param files The files to check (optional) # @param flags Reserved for future use, must be zero. # @returns True or False @_return_requires(bool) def fs_child_dependency(self, fs, files, flags=FLAG_RSVD): """ Returns True if the specified filesystem or specified file on this file system has child dependencies. This implies that this filesystem or specified file on this file system cannot be deleted or possibly modified because it would affect its children. """ return self._tp.rpc('fs_child_dependency', _del_self(locals())) ## Removes child dependencies from a FS or specific file. # @param self The this pointer # @param fs The file system to remove child dependencies for # @param files The list of files to remove child dependencies (opt.) # @param flags Reserved for future use, must be zero. # @returns None if complete, else job id. @_return_requires(unicode) def fs_child_dependency_rm(self, fs, files, flags=FLAG_RSVD): """ If this filesystem or specified file on this filesystem has child dependency this method will fully replicate the blocks removing the relationship between them. This should return None(success) if fs_child_dependency would return False. Note: This operation could take a very long time depending on the size of the filesystem and the number of child dependencies. Returns None if completed, else job id. Raises LsmError on errors. """ return self._tp.rpc('fs_child_dependency_rm', _del_self(locals())) ## Returns a list of all the NFS client authentication types. # @param self The this pointer # @param flags Reserved for future use, must be zero. # @returns An array of client authentication types. @_return_requires([unicode]) def export_auth(self, flags=FLAG_RSVD): """ What types of NFS client authentication are supported. """ return self._tp.rpc('export_auth', _del_self(locals())) ## Returns a list of all the exported file systems # @param self The this pointer # @param search_key Search Key # @param search_value Search value # @param flags Reserved for future use, must be zero. # @returns An array of export objects @_return_requires([NfsExport]) def exports(self, search_key=None, search_value=None, flags=FLAG_RSVD): """ Get a list of all exported file systems on the controller. """ _check_search_key(search_key, NfsExport.SUPPORTED_SEARCH_KEYS) return self._tp.rpc('exports', _del_self(locals())) ## Exports a FS as specified in the export. # @param self The this pointer # @param fs_id The FS ID to export # @param export_path The export path (Set to None for array to pick) # @param root_list List of hosts with root access # @param rw_list List of hosts with read/write access # @param ro_list List of hosts with read only access # @param anon_uid UID to map to anonymous # @param anon_gid GID to map to anonymous # @param auth_type NFS client authentication type # @param options Options to pass to plug-in # @param flags Reserved for future use, must be zero. # @returns NfsExport on success, else raises LsmError @_return_requires(NfsExport) def export_fs(self, fs_id, export_path, root_list, rw_list, ro_list, anon_uid=NfsExport.ANON_UID_GID_NA, anon_gid=NfsExport.ANON_UID_GID_NA, auth_type=None, options=None, flags=FLAG_RSVD): """ Exports a filesystem as specified in the arguments """ return self._tp.rpc('export_fs', _del_self(locals())) ## Removes the specified export # @param self The this pointer # @param export The export to remove # @param flags Reserved for future use, must be zero. # @returns None on success, else raises LsmError @_return_requires(None) def export_remove(self, export, flags=FLAG_RSVD): """ Removes the specified export """ return self._tp.rpc('export_remove', _del_self(locals())) ## Returns a list of target ports # @param self The this pointer # @param search_key The key to search against # @param search_value The value to search for # @param flags Reserved for future use, must be zero # @returns List of target ports, else raises LsmError @_return_requires([TargetPort]) def target_ports(self, search_key=None, search_value=None, flags=FLAG_RSVD): """ Returns a list of target ports """ _check_search_key(search_key, TargetPort.SUPPORTED_SEARCH_KEYS) return self._tp.rpc('target_ports', _del_self(locals())) ## Returns the RAID information of certain volume # @param self The this pointer # @param raid_type The RAID type of this volume # @param strip_size The size of strip of disk or other storage # extent. # @param disk_count The count of disks of RAID group(s) where # this volume allocated from. # @param min_io_size The preferred I/O size of random I/O. # @param opt_io_size The preferred I/O size of sequential I/O. # @returns List of target ports, else raises LsmError @_return_requires([int, int, int, int, int]) def volume_raid_info(self, volume, flags=FLAG_RSVD): """Query the RAID information of certain volume. New in version 1.2. Query the RAID type, strip size, extents count, minimum I/O size, optimal I/O size of given volume. This method requires this capability: lsm.Capabilities.VOLUME_RAID_INFO Args: volume (Volume object): Volume to query flags (int): Reserved for future use. Should be set as lsm.Client.FLAG_RSVD Returns: [raid_type, strip_size, disk_count, min_io_size, opt_io_size] raid_type (int): RAID Type of requested volume. Could be one of these values: Volume.RAID_TYPE_RAID0 Stripe Volume.RAID_TYPE_RAID1 Two disks Mirror Volume.RAID_TYPE_RAID3 Byte-level striping with dedicated parity Volume.RAID_TYPE_RAID4 Block-level striping with dedicated parity Volume.RAID_TYPE_RAID5 Block-level striping with distributed parity Volume.RAID_TYPE_RAID6 Block-level striping with two distributed parities, aka, RAID-DP Volume.RAID_TYPE_RAID10 Stripe of mirrors Volume.RAID_TYPE_RAID15 Parity of mirrors Volume.RAID_TYPE_RAID16 Dual parity of mirrors Volume.RAID_TYPE_RAID50 Stripe of parities Volume.RAID_TYPE_RAID60 Stripe of dual parities Volume.RAID_TYPE_RAID51 Mirror of parities Volume.RAID_TYPE_RAID61 Mirror of dual parities Volume.RAID_TYPE_JBOD Just bunch of disks, no parity, no striping. Volume.RAID_TYPE_UNKNOWN The plugin failed to detect the volume's RAID type. Volume.RAID_TYPE_MIXED This volume contains multiple RAID settings. Volume.RAID_TYPE_OTHER Vendor specific RAID type strip_size(int): The size of strip on each disk or other storage extent. For RAID1/JBOD, it should be set as sector size. If plugin failed to detect strip size, it should be set as Volume.STRIP_SIZE_UNKNOWN(0). disk_count(int): The count of disks used for assembling the RAID group(s) where this volume allocated from. For any RAID system using the slice of disk, this value indicate how many disk slices are used for the RAID. For exmaple, on LVM RAID, the 'disk_count' here indicate the count of PVs used for certain volume. Another example, on EMC VMAX, the 'disk_count' here indicate how many hyper volumes are used for this volume. For any RAID system using remote LUN for data storing, each remote LUN should be count as a disk. If the plugin failed to detect disk_count, it should be set as Volume.DISK_COUNT_UNKNOWN(0). min_io_size(int): The minimum I/O size, device preferred I/O size for random I/O. Any I/O size not equal to a multiple of this value may get significant speed penalty. Normally it refers to strip size of each disk(extent). If plugin failed to detect min_io_size, it should try these values in the sequence of: logical sector size -> physical sector size -> Volume.MIN_IO_SIZE_UNKNOWN(0). opt_io_size(int): The optimal I/O size, device preferred I/O size for sequential I/O. Normally it refers to RAID group stripe size. If plugin failed to detect opt_io_size, it should be set to Volume.OPT_IO_SIZE_UNKNOWN(0). Raises: LsmError: ErrorNumber.NO_SUPPORT No support. """ return self._tp.rpc('volume_raid_info', _del_self(locals())) @_return_requires([int, int, [unicode]]) def pool_member_info(self, pool, flags=FLAG_RSVD): """ lsm.Client.pool_member_info(self, pool, flags=lsm.Client.FLAG_RSVD) Version: 1.2 Usage: Query the membership information of certain pool: RAID type, member type and member ids. Currently, LibStorageMgmt supports two types of pool: * Sub-pool -- Pool.MEMBER_TYPE_POOL Pool space is allocated from parent pool. Example: * NetApp ONTAP volume * Disk RAID pool -- Pool.MEMBER_TYPE_DISK Pool is a RAID group assembled by disks. Example: * LSI MegaRAID disk group * EMC VNX pool * NetApp ONTAP aggregate Parameters: pool (lsm.Pool object) Pool to query flags (int) Optional. Reserved for future use. Should be set as lsm.Client.FLAG_RSVD. Returns: [raid_type, member_type, member_ids] raid_type (int) RAID Type of requested pool. Could be one of these values: Volume.RAID_TYPE_RAID0 Stripe Volume.RAID_TYPE_RAID1 Two disks Mirror Volume.RAID_TYPE_RAID3 Byte-level striping with dedicated parity Volume.RAID_TYPE_RAID4 Block-level striping with dedicated parity Volume.RAID_TYPE_RAID5 Block-level striping with distributed parity Volume.RAID_TYPE_RAID6 Block-level striping with two distributed parities, aka, RAID-DP Volume.RAID_TYPE_RAID10 Stripe of mirrors Volume.RAID_TYPE_RAID15 Parity of mirrors Volume.RAID_TYPE_RAID16 Dual parity of mirrors Volume.RAID_TYPE_RAID50 Stripe of parities Volume.RAID_TYPE_RAID60 Stripe of dual parities Volume.RAID_TYPE_RAID51 Mirror of parities Volume.RAID_TYPE_RAID61 Mirror of dual parities Volume.RAID_TYPE_JBOD Just bunch of disks, no parity, no striping. Volume.RAID_TYPE_UNKNOWN The plugin failed to detect the volume's RAID type. Volume.RAID_TYPE_MIXED This pool contains multiple RAID settings. Volume.RAID_TYPE_OTHER Vendor specific RAID type member_type (int) Could be one of these values: Pool.MEMBER_TYPE_POOL Current pool(also known as sub-pool) is allocated from other pool(parent pool). The 'raid_type' will set to RAID_TYPE_OTHER unless certain RAID system support RAID using space of parent pools. Pool.MEMBER_TYPE_DISK Pool is created from RAID group using whole disks. Pool.MEMBER_TYPE_OTHER Vendor specific RAID member type. Pool.MEMBER_TYPE_UNKNOWN Plugin failed to detect the RAID member type. member_ids (list of strings) When 'member_type' is Pool.MEMBER_TYPE_POOL, the 'member_ids' will contain a list of parent Pool IDs. When 'member_type' is Pool.MEMBER_TYPE_DISK, the 'member_ids' will contain a list of disk IDs. When 'member_type' is Pool.MEMBER_TYPE_OTHER or Pool.MEMBER_TYPE_UNKNOWN, the member_ids should be an empty list. SpecialExceptions: LsmError ErrorNumber.NO_SUPPORT ErrorNumber.NOT_FOUND_POOL Capability: lsm.Capabilities.POOL_MEMBER_INFO """ return self._tp.rpc('pool_member_info', _del_self(locals())) @_return_requires([[int], [int]]) def volume_raid_create_cap_get(self, system, flags=FLAG_RSVD): """ lsm.Client.volume_raid_create_cap_get( self, system, flags=lsm.Client.FLAG_RSVD) Version: 1.2 Usage: This method is dedicated to local hardware RAID cards. Query out all supported RAID types and strip sizes which could be used by lsm.Client.volume_raid_create() method. Parameters: system (lsm.System) Instance of lsm.System flags (int) Optional. Reserved for future use. Should be set as lsm.Client.FLAG_RSVD. Returns: [raid_types, strip_sizes] raid_types ([int]) List of integer, possible values are: Volume.RAID_TYPE_RAID0 Volume.RAID_TYPE_RAID1 Volume.RAID_TYPE_RAID5 Volume.RAID_TYPE_RAID6 Volume.RAID_TYPE_RAID10 Volume.RAID_TYPE_RAID50 Volume.RAID_TYPE_RAID60 strip_sizes ([int]) List of integer. Stripe size in bytes. SpecialExceptions: LsmError lsm.ErrorNumber.NO_SUPPORT Method not supported. Sample: lsm_client = lsm.Client('sim://') lsm_sys = lsm_client.systems()[0] disks = lsm_client.disks( search_key='system_id', search_value=lsm_sys.id) free_disks = [d for d in disks if d.status == Disk.STATUS_FREE] supported_raid_types, supported_strip_sizes = \ lsm_client.volume_raid_create_cap_get(lsm_sys) new_vol = lsm_client.volume_raid_create( 'test_volume_raid_create', supported_raid_types[0], free_disks, supported_strip_sizes[0]) Capability: lsm.Capabilities.VOLUME_CREATE_RAID This method is mandatory when volume_raid_create() is supported. """ return self._tp.rpc('volume_raid_create_cap_get', _del_self(locals())) @_return_requires(Volume) def volume_raid_create(self, name, raid_type, disks, strip_size, flags=FLAG_RSVD): """ lsm.Client.volume_raid_create(self, name, raid_type, disks, strip_size, flags=lsm.Client.FLAG_RSVD) Version: 1.2 Usage: This method is dedicated to local hardware RAID cards. Create a disk RAID pool and allocate entire storage space to new volume using requested volume name. When dealing with RAID10, 50 or 60, the first half part of 'disks' will be located in one bottom layer RAID group. The new volume and new pool will created within the same system of provided disks. This method does not allow duplicate call, when duplicate call was issued, LsmError with ErrorNumber.DISK_NOT_FREE will be raise. User should check disk.status for Disk.STATUS_FREE before invoking this method. Parameters: name (string) The name for new volume. The requested volume name might be ignored due to restriction of hardware RAID vendors. The pool name will be automatically choosed by plugin. raid_type (int) The RAID type for the RAID group, possible values are: Volume.RAID_TYPE_RAID0 Volume.RAID_TYPE_RAID1 Volume.RAID_TYPE_RAID5 Volume.RAID_TYPE_RAID6 Volume.RAID_TYPE_RAID10 Volume.RAID_TYPE_RAID15 Volume.RAID_TYPE_RAID16 Volume.RAID_TYPE_RAID50 Please check volume_raid_create_cap_get() returns to get supported all raid types of current hardware RAID card. disks ([lsm.Disks,]) A list of lsm.Disk objects. Free disks used for new RAID group. strip_size (int) The size in bytes of strip. When setting strip_size to Volume.VCR_STRIP_SIZE_DEFAULT, it allow hardware RAID cards to choose their default value. Please use volume_raid_create_cap_get() method to get all supported strip size of current hardware RAID card. The Volume.VCR_STRIP_SIZE_DEFAULT is always supported when lsm.Capabilities.VOLUME_CREATE_RAID is supported. flags (int) Optional. Reserved for future use. Should be set as lsm.Client.FLAG_RSVD. Returns: lsm.Volume The lsm.Volume object for newly created volume. SpecialExceptions: LsmError lsm.ErrorNumber.NO_SUPPORT Method not supported or RAID type not supported. lsm.ErrorNumber.DISK_NOT_FREE Disk is not in Disk.STATUS_FREE status. lsm.ErrorNumber.NOT_FOUND_DISK Disk not found lsm.ErrorNumber.INVALID_ARGUMENT 1. Invalid input argument data. 2. Disks are not from the same system. 3. Disks are not from the same enclosure. 4. Invalid strip_size. 5. Disk count are meet the minimum requirement: RAID1: len(disks) == 2 RAID5: len(disks) >= 3 RAID6: len(disks) >= 4 RAID10: len(disks) % 2 == 0 and len(disks) >= 4 RAID50: len(disks) % 2 == 0 and len(disks) >= 6 RAID60: len(disks) % 2 == 0 and len(disks) >= 8 lsm.ErrorNumber.NAME_CONFLICT Requested name is already be used by other volume. Sample: lsm_client = lsm.Client('sim://') disks = lsm_client.disks() free_disks = [d for d in disks if d.status == Disk.STATUS_FREE] new_vol = lsm_client.volume_raid_create( 'raid0_vol1', Volume.RAID_TYPE_RAID0, free_disks) Capability: lsm.Capabilities.VOLUME_CREATE_RAID Indicate current system support volume_raid_create() method. At least one RAID type should be supported. The strip_size == Volume.VCR_STRIP_SIZE_DEFAULT is supported. """ if len(disks) == 0: raise LsmError( ErrorNumber.INVALID_ARGUMENT, "Illegal input disks argument: no disk included") if raid_type == Volume.RAID_TYPE_RAID1 and len(disks) != 2: raise LsmError( ErrorNumber.INVALID_ARGUMENT, "Illegal input disks argument: RAID 1 only allow 2 disks") if raid_type == Volume.RAID_TYPE_RAID5 and len(disks) < 3: raise LsmError( ErrorNumber.INVALID_ARGUMENT, "Illegal input disks argument: RAID 5 require 3 or more disks") if raid_type == Volume.RAID_TYPE_RAID6 and len(disks) < 4: raise LsmError( ErrorNumber.INVALID_ARGUMENT, "Illegal input disks argument: RAID 6 require 4 or more disks") if raid_type == Volume.RAID_TYPE_RAID10: if len(disks) % 2 or len(disks) < 4: raise LsmError( ErrorNumber.INVALID_ARGUMENT, "Illegal input disks argument: " "RAID 10 require even disks count and 4 or more disks") if raid_type == Volume.RAID_TYPE_RAID50: if len(disks) % 2 or len(disks) < 6: raise LsmError( ErrorNumber.INVALID_ARGUMENT, "Illegal input disks argument: " "RAID 50 require even disks count and 6 or more disks") if raid_type == Volume.RAID_TYPE_RAID60: if len(disks) % 2 or len(disks) < 8: raise LsmError( ErrorNumber.INVALID_ARGUMENT, "Illegal input disks argument: " "RAID 60 require even disks count and 8 or more disks") return self._tp.rpc('volume_raid_create', _del_self(locals())) libstoragemgmt-1.2.3/python_binding/Makefile.am0000664000175000017500000000045712537546123016545 00000000000000 lsmdir = $(pythondir)/lsm externaldir = $(lsmdir)/external lsm_PYTHON = \ lsm/__init__.py \ lsm/_client.py \ lsm/_common.py \ lsm/_data.py \ lsm/_transport.py \ lsm/version.py \ lsm/_iplugin.py \ lsm/_pluginrunner.py external_PYTHON = \ lsm/external/__init__.py \ lsm/external/xmltodict.py libstoragemgmt-1.2.3/python_binding/Makefile.in0000664000175000017500000004415512542455445016563 00000000000000# Makefile.in generated by automake 1.13.4 from Makefile.am. # @configure_input@ # Copyright (C) 1994-2013 Free Software Foundation, Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__is_gnu_make = test -n '$(MAKEFILE_LIST)' && test -n '$(MAKELEVEL)' am__make_running_with_option = \ case $${target_option-} in \ ?) ;; \ *) echo "am__make_running_with_option: internal error: invalid" \ "target option '$${target_option-}' specified" >&2; \ exit 1;; \ esac; \ has_opt=no; \ sane_makeflags=$$MAKEFLAGS; \ if $(am__is_gnu_make); then \ sane_makeflags=$$MFLAGS; \ else \ case $$MAKEFLAGS in \ *\\[\ \ ]*) \ bs=\\; \ sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \ | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \ esac; \ fi; \ skip_next=no; \ strip_trailopt () \ { \ flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \ }; \ for flg in $$sane_makeflags; do \ test $$skip_next = yes && { skip_next=no; continue; }; \ case $$flg in \ *=*|--*) continue;; \ -*I) strip_trailopt 'I'; skip_next=yes;; \ -*I?*) strip_trailopt 'I';; \ -*O) strip_trailopt 'O'; skip_next=yes;; \ -*O?*) strip_trailopt 'O';; \ -*l) strip_trailopt 'l'; skip_next=yes;; \ -*l?*) strip_trailopt 'l';; \ -[dEDm]) skip_next=yes;; \ -[JT]) skip_next=yes;; \ esac; \ case $$flg in \ *$$target_option*) has_opt=yes; break;; \ esac; \ done; \ test $$has_opt = yes am__make_dryrun = (target_option=n; $(am__make_running_with_option)) am__make_keepgoing = (target_option=k; $(am__make_running_with_option)) pkgdatadir = $(datadir)/@PACKAGE@ pkgincludedir = $(includedir)/@PACKAGE@ pkglibdir = $(libdir)/@PACKAGE@ pkglibexecdir = $(libexecdir)/@PACKAGE@ am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ subdir = python_binding DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/Makefile.am \ $(external_PYTHON) $(lsm_PYTHON) \ $(top_srcdir)/build-aux/py-compile ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = $(top_srcdir)/m4/ax_python_module.m4 \ $(top_srcdir)/m4/libtool.m4 $(top_srcdir)/m4/ltoptions.m4 \ $(top_srcdir)/m4/ltsugar.m4 $(top_srcdir)/m4/ltversion.m4 \ $(top_srcdir)/m4/lt~obsolete.m4 $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) mkinstalldirs = $(install_sh) -d CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = CONFIG_CLEAN_VPATH_FILES = AM_V_P = $(am__v_P_@AM_V@) am__v_P_ = $(am__v_P_@AM_DEFAULT_V@) am__v_P_0 = false am__v_P_1 = : AM_V_GEN = $(am__v_GEN_@AM_V@) am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@) am__v_GEN_0 = @echo " GEN " $@; am__v_GEN_1 = AM_V_at = $(am__v_at_@AM_V@) am__v_at_ = $(am__v_at_@AM_DEFAULT_V@) am__v_at_0 = @ am__v_at_1 = SOURCES = DIST_SOURCES = am__can_run_installinfo = \ case $$AM_UPDATE_INFO_DIR in \ n|no|NO) false;; \ *) (install-info --version) >/dev/null 2>&1;; \ esac am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; am__vpath_adj = case $$p in \ $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ *) f=$$p;; \ esac; am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`; am__install_max = 40 am__nobase_strip_setup = \ srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'` am__nobase_strip = \ for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||" am__nobase_list = $(am__nobase_strip_setup); \ for p in $$list; do echo "$$p $$p"; done | \ sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \ $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \ if (++n[$$2] == $(am__install_max)) \ { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \ END { for (dir in files) print dir, files[dir] }' am__base_list = \ sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \ sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g' am__uninstall_files_from_dir = { \ test -z "$$files" \ || { test ! -d "$$dir" && test ! -f "$$dir" && test ! -r "$$dir"; } \ || { echo " ( cd '$$dir' && rm -f" $$files ")"; \ $(am__cd) "$$dir" && rm -f $$files; }; \ } am__py_compile = PYTHON=$(PYTHON) $(SHELL) $(py_compile) am__installdirs = "$(DESTDIR)$(externaldir)" "$(DESTDIR)$(lsmdir)" am__pep3147_tweak = \ sed -e 's|\.py$$||' -e 's|[^/]*$$|__pycache__/&.*.py|' py_compile = $(top_srcdir)/build-aux/py-compile am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) ACLOCAL = @ACLOCAL@ AMTAR = @AMTAR@ AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@ AR = @AR@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ FGREP = @FGREP@ GREP = @GREP@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ JSON_CFLAGS = @JSON_CFLAGS@ JSON_LIBS = @JSON_LIBS@ LD = @LD@ LDFLAGS = @LDFLAGS@ LIBCHECK_CFLAGS = @LIBCHECK_CFLAGS@ LIBCHECK_LIBS = @LIBCHECK_LIBS@ LIBCONFIG_CFLAGS = @LIBCONFIG_CFLAGS@ LIBCONFIG_LIBS = @LIBCONFIG_LIBS@ LIBGLIB_CFLAGS = @LIBGLIB_CFLAGS@ LIBGLIB_LIBS = @LIBGLIB_LIBS@ LIBMICROHTTPD_CFLAGS = @LIBMICROHTTPD_CFLAGS@ LIBMICROHTTPD_LIBS = @LIBMICROHTTPD_LIBS@ LIBOBJS = @LIBOBJS@ LIBS = @LIBS@ LIBSM_LIBTOOL_VERSION = @LIBSM_LIBTOOL_VERSION@ LIBSM_MAJOR_VERSION = @LIBSM_MAJOR_VERSION@ LIBSM_MICRO_VERSION = @LIBSM_MICRO_VERSION@ LIBSM_MINOR_VERSION = @LIBSM_MINOR_VERSION@ LIBSM_VERSION = @LIBSM_VERSION@ LIBSM_VERSION_INFO = @LIBSM_VERSION_INFO@ LIBSM_VERSION_NUMBER = @LIBSM_VERSION_NUMBER@ LIBTOOL = @LIBTOOL@ LIBXML_CFLAGS = @LIBXML_CFLAGS@ LIBXML_LIBS = @LIBXML_LIBS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBOBJS = @LTLIBOBJS@ MAINT = @MAINT@ MAKEINFO = @MAKEINFO@ MANIFEST_TOOL = @MANIFEST_TOOL@ MKDIR_P = @MKDIR_P@ NM = @NM@ NMEDIT = @NMEDIT@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_URL = @PACKAGE_URL@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PERL = @PERL@ PKG_CONFIG = @PKG_CONFIG@ PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@ PKG_CONFIG_PATH = @PKG_CONFIG_PATH@ PYTHON = @PYTHON@ PYTHON_EXEC_PREFIX = @PYTHON_EXEC_PREFIX@ PYTHON_PLATFORM = @PYTHON_PLATFORM@ PYTHON_PREFIX = @PYTHON_PREFIX@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SSL_LIBS = @SSL_LIBS@ STRIP = @STRIP@ VERSION = @VERSION@ YAJL_LIBS = @YAJL_LIBS@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_AR = @ac_ct_AR@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ bashcompletiondir = @bashcompletiondir@ bindir = @bindir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ datadir = @datadir@ datarootdir = @datarootdir@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ install_sh = @install_sh@ libdir = @libdir@ libexecdir = @libexecdir@ localedir = @localedir@ localstatedir = @localstatedir@ mandir = @mandir@ mkdir_p = @mkdir_p@ oldincludedir = @oldincludedir@ pdfdir = @pdfdir@ pkgpyexecdir = @pkgpyexecdir@ pkgpythondir = @pkgpythondir@ prefix = @prefix@ program_transform_name = @program_transform_name@ psdir = @psdir@ pyexecdir = @pyexecdir@ pythondir = @pythondir@ sbindir = @sbindir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ systemdsystemunitdir = @systemdsystemunitdir@ target_alias = @target_alias@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ lsmdir = $(pythondir)/lsm externaldir = $(lsmdir)/external lsm_PYTHON = \ lsm/__init__.py \ lsm/_client.py \ lsm/_common.py \ lsm/_data.py \ lsm/_transport.py \ lsm/version.py \ lsm/_iplugin.py \ lsm/_pluginrunner.py external_PYTHON = \ lsm/external/__init__.py \ lsm/external/xmltodict.py all: all-am .SUFFIXES: $(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu python_binding/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --gnu python_binding/Makefile .PRECIOUS: Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs install-externalPYTHON: $(external_PYTHON) @$(NORMAL_INSTALL) @list='$(external_PYTHON)'; dlist=; list2=; test -n "$(externaldir)" || list=; \ if test -n "$$list"; then \ echo " $(MKDIR_P) '$(DESTDIR)$(externaldir)'"; \ $(MKDIR_P) "$(DESTDIR)$(externaldir)" || exit 1; \ fi; \ for p in $$list; do \ if test -f "$$p"; then b=; else b="$(srcdir)/"; fi; \ if test -f $$b$$p; then \ $(am__strip_dir) \ dlist="$$dlist $$f"; \ list2="$$list2 $$b$$p"; \ else :; fi; \ done; \ for file in $$list2; do echo $$file; done | $(am__base_list) | \ while read files; do \ echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(externaldir)'"; \ $(INSTALL_DATA) $$files "$(DESTDIR)$(externaldir)" || exit $$?; \ done || exit $$?; \ if test -n "$$dlist"; then \ $(am__py_compile) --destdir "$(DESTDIR)" \ --basedir "$(externaldir)" $$dlist; \ else :; fi uninstall-externalPYTHON: @$(NORMAL_UNINSTALL) @list='$(external_PYTHON)'; test -n "$(externaldir)" || list=; \ py_files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ test -n "$$py_files" || exit 0; \ dir='$(DESTDIR)$(externaldir)'; \ pyc_files=`echo "$$py_files" | sed 's|$$|c|'`; \ pyo_files=`echo "$$py_files" | sed 's|$$|o|'`; \ py_files_pep3147=`echo "$$py_files" | $(am__pep3147_tweak)`; \ echo "$$py_files_pep3147";\ pyc_files_pep3147=`echo "$$py_files_pep3147" | sed 's|$$|c|'`; \ pyo_files_pep3147=`echo "$$py_files_pep3147" | sed 's|$$|o|'`; \ st=0; \ for files in \ "$$py_files" \ "$$pyc_files" \ "$$pyo_files" \ "$$pyc_files_pep3147" \ "$$pyo_files_pep3147" \ ; do \ $(am__uninstall_files_from_dir) || st=$$?; \ done; \ exit $$st install-lsmPYTHON: $(lsm_PYTHON) @$(NORMAL_INSTALL) @list='$(lsm_PYTHON)'; dlist=; list2=; test -n "$(lsmdir)" || list=; \ if test -n "$$list"; then \ echo " $(MKDIR_P) '$(DESTDIR)$(lsmdir)'"; \ $(MKDIR_P) "$(DESTDIR)$(lsmdir)" || exit 1; \ fi; \ for p in $$list; do \ if test -f "$$p"; then b=; else b="$(srcdir)/"; fi; \ if test -f $$b$$p; then \ $(am__strip_dir) \ dlist="$$dlist $$f"; \ list2="$$list2 $$b$$p"; \ else :; fi; \ done; \ for file in $$list2; do echo $$file; done | $(am__base_list) | \ while read files; do \ echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(lsmdir)'"; \ $(INSTALL_DATA) $$files "$(DESTDIR)$(lsmdir)" || exit $$?; \ done || exit $$?; \ if test -n "$$dlist"; then \ $(am__py_compile) --destdir "$(DESTDIR)" \ --basedir "$(lsmdir)" $$dlist; \ else :; fi uninstall-lsmPYTHON: @$(NORMAL_UNINSTALL) @list='$(lsm_PYTHON)'; test -n "$(lsmdir)" || list=; \ py_files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ test -n "$$py_files" || exit 0; \ dir='$(DESTDIR)$(lsmdir)'; \ pyc_files=`echo "$$py_files" | sed 's|$$|c|'`; \ pyo_files=`echo "$$py_files" | sed 's|$$|o|'`; \ py_files_pep3147=`echo "$$py_files" | $(am__pep3147_tweak)`; \ echo "$$py_files_pep3147";\ pyc_files_pep3147=`echo "$$py_files_pep3147" | sed 's|$$|c|'`; \ pyo_files_pep3147=`echo "$$py_files_pep3147" | sed 's|$$|o|'`; \ st=0; \ for files in \ "$$py_files" \ "$$pyc_files" \ "$$pyo_files" \ "$$pyc_files_pep3147" \ "$$pyo_files_pep3147" \ ; do \ $(am__uninstall_files_from_dir) || st=$$?; \ done; \ exit $$st tags TAGS: ctags CTAGS: cscope cscopelist: distdir: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done check-am: all-am check: check-am all-am: Makefile installdirs: for dir in "$(DESTDIR)$(externaldir)" "$(DESTDIR)$(lsmdir)"; do \ test -z "$$dir" || $(MKDIR_P) "$$dir"; \ done install: install-am install-exec: install-exec-am install-data: install-data-am uninstall: uninstall-am install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-am install-strip: if test -z '$(STRIP)'; then \ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ install; \ else \ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ fi mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-am clean-am: clean-generic clean-libtool mostlyclean-am distclean: distclean-am -rm -f Makefile distclean-am: clean-am distclean-generic dvi: dvi-am dvi-am: html: html-am html-am: info: info-am info-am: install-data-am: install-externalPYTHON install-lsmPYTHON install-dvi: install-dvi-am install-dvi-am: install-exec-am: install-html: install-html-am install-html-am: install-info: install-info-am install-info-am: install-man: install-pdf: install-pdf-am install-pdf-am: install-ps: install-ps-am install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-am -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-am mostlyclean-am: mostlyclean-generic mostlyclean-libtool pdf: pdf-am pdf-am: ps: ps-am ps-am: uninstall-am: uninstall-externalPYTHON uninstall-lsmPYTHON .MAKE: install-am install-strip .PHONY: all all-am check check-am clean clean-generic clean-libtool \ cscopelist-am ctags-am distclean distclean-generic \ distclean-libtool distdir dvi dvi-am html html-am info info-am \ install install-am install-data install-data-am install-dvi \ install-dvi-am install-exec install-exec-am \ install-externalPYTHON install-html install-html-am \ install-info install-info-am install-lsmPYTHON install-man \ install-pdf install-pdf-am install-ps install-ps-am \ install-strip installcheck installcheck-am installdirs \ maintainer-clean maintainer-clean-generic mostlyclean \ mostlyclean-generic mostlyclean-libtool pdf pdf-am ps ps-am \ tags-am uninstall uninstall-am uninstall-externalPYTHON \ uninstall-lsmPYTHON # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: libstoragemgmt-1.2.3/Makefile.am0000664000175000017500000000132012542256740013517 00000000000000## Process this file with automake to produce Makefile.in ACLOCAL_AMFLAGS = -I m4 DISTCHECK_CONFIGURE_FLAGS = --with-systemdsystemunitdir=$$dc_install_base/$(systemdsystemunitdir) DISTCHECK_CONFIGURE_FLAGS += \ --with-bash-completion-dir=$$dc_install_base/$(bashcompletiondir) SUBDIRS= c_binding python_binding plugin doc tools daemon packaging config if BUILD_C_UNIT SUBDIRS += test endif EXTRA_DIST = \ libstoragemgmt.pc.in \ libstoragemgmt.pc \ autogen.sh pkgconfigdir = $(libdir)/pkgconfig pkgconfig_DATA = libstoragemgmt.pc #Source code documentation docs: doxygen doc/doxygen.conf rpm: clean @(unset CDPATH ; $(MAKE) dist && rpmbuild -ta $(distdir).tar.gz) MAINTAINERCLEANFILES = .git-module-status libstoragemgmt-1.2.3/doc/0000775000175000017500000000000012542455463012317 500000000000000libstoragemgmt-1.2.3/doc/doxygen.conf.in0000664000175000017500000020003512537546123015166 00000000000000# Doxyfile 1.6.1 # This file describes the settings to be used by the documentation system # doxygen (www.doxygen.org) for a project # # All text after a hash (#) is considered a comment and will be ignored # The format is: # TAG = value [value, ...] # For lists items can also be appended using: # TAG += value [value, ...] # Values that contain spaces should be placed between quotes (" ") #--------------------------------------------------------------------------- # Project related configuration options #--------------------------------------------------------------------------- # This tag specifies the encoding used for all characters in the config file # that follow. The default is UTF-8 which is also the encoding used for all # text before the first occurrence of this tag. Doxygen uses libiconv (or the # iconv built into libc) for the transcoding. See # http://www.gnu.org/software/libiconv for the list of possible encodings. DOXYFILE_ENCODING = UTF-8 # The PROJECT_NAME tag is a single word (or a sequence of words surrounded # by quotes) that should identify the project. PROJECT_NAME = libStorageMgmt # The PROJECT_NUMBER tag can be used to enter a project or revision number. # This could be handy for archiving the generated documentation or # if some version control system is used. PROJECT_NUMBER = @VERSION@ # The OUTPUT_DIRECTORY tag is used to specify the (relative or absolute) # base path where the generated documentation will be put. # If a relative path is entered, it will be relative to the location # where doxygen was started. If left blank the current directory will be used. OUTPUT_DIRECTORY = ./doc/srcdoc/ # If the CREATE_SUBDIRS tag is set to YES, then doxygen will create # 4096 sub-directories (in 2 levels) under the output directory of each output # format and will distribute the generated files over these directories. # Enabling this option can be useful when feeding doxygen a huge amount of # source files, where putting all generated files in the same directory would # otherwise cause performance problems for the file system. CREATE_SUBDIRS = NO # The OUTPUT_LANGUAGE tag is used to specify the language in which all # documentation generated by doxygen is written. Doxygen will use this # information to generate all constant output in the proper language. # The default language is English, other supported languages are: # Afrikaans, Arabic, Brazilian, Catalan, Chinese, Chinese-Traditional, # Croatian, Czech, Danish, Dutch, Esperanto, Farsi, Finnish, French, German, # Greek, Hungarian, Italian, Japanese, Japanese-en (Japanese with English # messages), Korean, Korean-en, Lithuanian, Norwegian, Macedonian, Persian, # Polish, Portuguese, Romanian, Russian, Serbian, Serbian-Cyrilic, Slovak, # Slovene, Spanish, Swedish, Ukrainian, and Vietnamese. OUTPUT_LANGUAGE = English # If the BRIEF_MEMBER_DESC tag is set to YES (the default) Doxygen will # include brief member descriptions after the members that are listed in # the file and class documentation (similar to JavaDoc). # Set to NO to disable this. BRIEF_MEMBER_DESC = YES # If the REPEAT_BRIEF tag is set to YES (the default) Doxygen will prepend # the brief description of a member or function before the detailed description. # Note: if both HIDE_UNDOC_MEMBERS and BRIEF_MEMBER_DESC are set to NO, the # brief descriptions will be completely suppressed. REPEAT_BRIEF = YES # This tag implements a quasi-intelligent brief description abbreviator # that is used to form the text in various listings. Each string # in this list, if found as the leading text of the brief description, will be # stripped from the text and the result after processing the whole list, is # used as the annotated text. Otherwise, the brief description is used as-is. # If left blank, the following values are used ("$name" is automatically # replaced with the name of the entity): "The $name class" "The $name widget" # "The $name file" "is" "provides" "specifies" "contains" # "represents" "a" "an" "the" ABBREVIATE_BRIEF = "The $name class" \ "The $name widget" \ "The $name file" \ is \ provides \ specifies \ contains \ represents \ a \ an \ the # If the ALWAYS_DETAILED_SEC and REPEAT_BRIEF tags are both set to YES then # Doxygen will generate a detailed section even if there is only a brief # description. ALWAYS_DETAILED_SEC = NO # If the INLINE_INHERITED_MEMB tag is set to YES, doxygen will show all # inherited members of a class in the documentation of that class as if those # members were ordinary class members. Constructors, destructors and assignment # operators of the base classes will not be shown. INLINE_INHERITED_MEMB = NO # If the FULL_PATH_NAMES tag is set to YES then Doxygen will prepend the full # path before files name in the file list and in the header files. If set # to NO the shortest path that makes the file name unique will be used. FULL_PATH_NAMES = NO # If the FULL_PATH_NAMES tag is set to YES then the STRIP_FROM_PATH tag # can be used to strip a user-defined part of the path. Stripping is # only done if one of the specified strings matches the left-hand part of # the path. The tag can be used to show relative paths in the file list. # If left blank the directory from which doxygen is run is used as the # path to strip. STRIP_FROM_PATH = /Users/dimitri/doxygen/mail/1.5.7/doxywizard/ # The STRIP_FROM_INC_PATH tag can be used to strip a user-defined part of # the path mentioned in the documentation of a class, which tells # the reader which header file to include in order to use a class. # If left blank only the name of the header file containing the class # definition is used. Otherwise one should specify the include paths that # are normally passed to the compiler using the -I flag. STRIP_FROM_INC_PATH = # If the SHORT_NAMES tag is set to YES, doxygen will generate much shorter # (but less readable) file names. This can be useful is your file systems # doesn't support long names like on DOS, Mac, or CD-ROM. SHORT_NAMES = NO # If the JAVADOC_AUTOBRIEF tag is set to YES then Doxygen # will interpret the first line (until the first dot) of a JavaDoc-style # comment as the brief description. If set to NO, the JavaDoc # comments will behave just like regular Qt-style comments # (thus requiring an explicit @brief command for a brief description.) JAVADOC_AUTOBRIEF = NO # If the QT_AUTOBRIEF tag is set to YES then Doxygen will # interpret the first line (until the first dot) of a Qt-style # comment as the brief description. If set to NO, the comments # will behave just like regular Qt-style comments (thus requiring # an explicit \brief command for a brief description.) QT_AUTOBRIEF = NO # The MULTILINE_CPP_IS_BRIEF tag can be set to YES to make Doxygen # treat a multi-line C++ special comment block (i.e. a block of //! or /// # comments) as a brief description. This used to be the default behaviour. # The new default is to treat a multi-line C++ comment block as a detailed # description. Set this tag to YES if you prefer the old behaviour instead. MULTILINE_CPP_IS_BRIEF = NO # If the INHERIT_DOCS tag is set to YES (the default) then an undocumented # member inherits the documentation from any documented member that it # re-implements. INHERIT_DOCS = YES # If the SEPARATE_MEMBER_PAGES tag is set to YES, then doxygen will produce # a new page for each member. If set to NO, the documentation of a member will # be part of the file/class/namespace that contains it. SEPARATE_MEMBER_PAGES = NO # The TAB_SIZE tag can be used to set the number of spaces in a tab. # Doxygen uses this value to replace tabs by spaces in code fragments. TAB_SIZE = 4 # This tag can be used to specify a number of aliases that acts # as commands in the documentation. An alias has the form "name=value". # For example adding "sideeffect=\par Side Effects:\n" will allow you to # put the command \sideeffect (or @sideeffect) in the documentation, which # will result in a user-defined paragraph with heading "Side Effects:". # You can put \n's in the value part of an alias to insert newlines. ALIASES = # Set the OPTIMIZE_OUTPUT_FOR_C tag to YES if your project consists of C # sources only. Doxygen will then generate output that is more tailored for C. # For instance, some of the names that are used will be different. The list # of all members will be omitted, etc. OPTIMIZE_OUTPUT_FOR_C = YES # Set the OPTIMIZE_OUTPUT_JAVA tag to YES if your project consists of Java # sources only. Doxygen will then generate output that is more tailored for # Java. For instance, namespaces will be presented as packages, qualified # scopes will look different, etc. OPTIMIZE_OUTPUT_JAVA = NO # Set the OPTIMIZE_FOR_FORTRAN tag to YES if your project consists of Fortran # sources only. Doxygen will then generate output that is more tailored for # Fortran. OPTIMIZE_FOR_FORTRAN = NO # Set the OPTIMIZE_OUTPUT_VHDL tag to YES if your project consists of VHDL # sources. Doxygen will then generate output that is tailored for # VHDL. OPTIMIZE_OUTPUT_VHDL = NO # Doxygen selects the parser to use depending on the extension of the files it parses. # With this tag you can assign which parser to use for a given extension. # Doxygen has a built-in mapping, but you can override or extend it using this tag. # The format is ext=language, where ext is a file extension, and language is one of # the parsers supported by doxygen: IDL, Java, Javascript, C#, C, C++, D, PHP, # Objective-C, Python, Fortran, VHDL, C, C++. For instance to make doxygen treat # .inc files as Fortran files (default is PHP), and .f files as C (default is Fortran), # use: inc=Fortran f=C. Note that for custom extensions you also need to set # FILE_PATTERNS otherwise the files are not read by doxygen. EXTENSION_MAPPING = # If you use STL classes (i.e. std::string, std::vector, etc.) but do not want # to include (a tag file for) the STL sources as input, then you should # set this tag to YES in order to let doxygen match functions declarations and # definitions whose arguments contain STL classes (e.g. func(std::string); v.s. # func(std::string) {}). This also make the inheritance and collaboration # diagrams that involve STL classes more complete and accurate. BUILTIN_STL_SUPPORT = NO # If you use Microsoft's C++/CLI language, you should set this option to YES to # enable parsing support. CPP_CLI_SUPPORT = NO # Set the SIP_SUPPORT tag to YES if your project consists of sip sources only. # Doxygen will parse them like normal C++ but will assume all classes use public # instead of private inheritance when no explicit protection keyword is present. SIP_SUPPORT = NO # For Microsoft's IDL there are propget and propput attributes to indicate getter # and setter methods for a property. Setting this option to YES (the default) # will make doxygen to replace the get and set methods by a property in the # documentation. This will only work if the methods are indeed getting or # setting a simple type. If this is not the case, or you want to show the # methods anyway, you should set this option to NO. IDL_PROPERTY_SUPPORT = YES # If member grouping is used in the documentation and the DISTRIBUTE_GROUP_DOC # tag is set to YES, then doxygen will reuse the documentation of the first # member in the group (if any) for the other members of the group. By default # all members of a group must be documented explicitly. DISTRIBUTE_GROUP_DOC = NO # Set the SUBGROUPING tag to YES (the default) to allow class member groups of # the same type (for instance a group of public functions) to be put as a # subgroup of that type (e.g. under the Public Functions section). Set it to # NO to prevent subgrouping. Alternatively, this can be done per class using # the \nosubgrouping command. SUBGROUPING = YES # When TYPEDEF_HIDES_STRUCT is enabled, a typedef of a struct, union, or enum # is documented as struct, union, or enum with the name of the typedef. So # typedef struct TypeS {} TypeT, will appear in the documentation as a struct # with name TypeT. When disabled the typedef will appear as a member of a file, # namespace, or class. And the struct will be named TypeS. This can typically # be useful for C code in case the coding convention dictates that all compound # types are typedef'ed and only the typedef is referenced, never the tag name. TYPEDEF_HIDES_STRUCT = NO # The SYMBOL_CACHE_SIZE determines the size of the internal cache use to # determine which symbols to keep in memory and which to flush to disk. # When the cache is full, less often used symbols will be written to disk. # For small to medium size projects (<1000 input files) the default value is # probably good enough. For larger projects a too small cache size can cause # doxygen to be busy swapping symbols to and from disk most of the time # causing a significant performance penality. # If the system has enough physical memory increasing the cache will improve the # performance by keeping more symbols in memory. Note that the value works on # a logarithmic scale so increasing the size by one will rougly double the # memory usage. The cache size is given by this formula: # 2^(16+SYMBOL_CACHE_SIZE). The valid range is 0..9, the default is 0, # corresponding to a cache size of 2^16 = 65536 symbols SYMBOL_CACHE_SIZE = 0 #--------------------------------------------------------------------------- # Build related configuration options #--------------------------------------------------------------------------- # If the EXTRACT_ALL tag is set to YES doxygen will assume all entities in # documentation are documented, even if no documentation was available. # Private class members and static file members will be hidden unless # the EXTRACT_PRIVATE and EXTRACT_STATIC tags are set to YES EXTRACT_ALL = YES # If the EXTRACT_PRIVATE tag is set to YES all private members of a class # will be included in the documentation. EXTRACT_PRIVATE = NO # If the EXTRACT_STATIC tag is set to YES all static members of a file # will be included in the documentation. EXTRACT_STATIC = NO # If the EXTRACT_LOCAL_CLASSES tag is set to YES classes (and structs) # defined locally in source files will be included in the documentation. # If set to NO only classes defined in header files are included. EXTRACT_LOCAL_CLASSES = YES # This flag is only useful for Objective-C code. When set to YES local # methods, which are defined in the implementation section but not in # the interface are included in the documentation. # If set to NO (the default) only methods in the interface are included. EXTRACT_LOCAL_METHODS = NO # If this flag is set to YES, the members of anonymous namespaces will be # extracted and appear in the documentation as a namespace called # 'anonymous_namespace{file}', where file will be replaced with the base # name of the file that contains the anonymous namespace. By default # anonymous namespace are hidden. EXTRACT_ANON_NSPACES = NO # If the HIDE_UNDOC_MEMBERS tag is set to YES, Doxygen will hide all # undocumented members of documented classes, files or namespaces. # If set to NO (the default) these members will be included in the # various overviews, but no documentation section is generated. # This option has no effect if EXTRACT_ALL is enabled. HIDE_UNDOC_MEMBERS = NO # If the HIDE_UNDOC_CLASSES tag is set to YES, Doxygen will hide all # undocumented classes that are normally visible in the class hierarchy. # If set to NO (the default) these classes will be included in the various # overviews. This option has no effect if EXTRACT_ALL is enabled. HIDE_UNDOC_CLASSES = NO # If the HIDE_FRIEND_COMPOUNDS tag is set to YES, Doxygen will hide all # friend (class|struct|union) declarations. # If set to NO (the default) these declarations will be included in the # documentation. HIDE_FRIEND_COMPOUNDS = NO # If the HIDE_IN_BODY_DOCS tag is set to YES, Doxygen will hide any # documentation blocks found inside the body of a function. # If set to NO (the default) these blocks will be appended to the # function's detailed documentation block. HIDE_IN_BODY_DOCS = NO # The INTERNAL_DOCS tag determines if documentation # that is typed after a \internal command is included. If the tag is set # to NO (the default) then the documentation will be excluded. # Set it to YES to include the internal documentation. INTERNAL_DOCS = NO # If the CASE_SENSE_NAMES tag is set to NO then Doxygen will only generate # file names in lower-case letters. If set to YES upper-case letters are also # allowed. This is useful if you have classes or files whose names only differ # in case and if your file system supports case sensitive file names. Windows # and Mac users are advised to set this option to NO. CASE_SENSE_NAMES = NO # If the HIDE_SCOPE_NAMES tag is set to NO (the default) then Doxygen # will show members with their full class and namespace scopes in the # documentation. If set to YES the scope will be hidden. HIDE_SCOPE_NAMES = YES # If the SHOW_INCLUDE_FILES tag is set to YES (the default) then Doxygen # will put a list of the files that are included by a file in the documentation # of that file. SHOW_INCLUDE_FILES = YES # If the INLINE_INFO tag is set to YES (the default) then a tag [inline] # is inserted in the documentation for inline members. INLINE_INFO = YES # If the SORT_MEMBER_DOCS tag is set to YES (the default) then doxygen # will sort the (detailed) documentation of file and class members # alphabetically by member name. If set to NO the members will appear in # declaration order. SORT_MEMBER_DOCS = YES # If the SORT_BRIEF_DOCS tag is set to YES then doxygen will sort the # brief documentation of file, namespace and class members alphabetically # by member name. If set to NO (the default) the members will appear in # declaration order. SORT_BRIEF_DOCS = NO # If the SORT_MEMBERS_CTORS_1ST tag is set to YES then doxygen # will sort the (brief and detailed) documentation of class members so that # constructors and destructors are listed first. If set to NO (the default) # the constructors will appear in the respective orders defined by # SORT_MEMBER_DOCS and SORT_BRIEF_DOCS. # This tag will be ignored for brief docs if SORT_BRIEF_DOCS is set to NO # and ignored for detailed docs if SORT_MEMBER_DOCS is set to NO. SORT_MEMBERS_CTORS_1ST = NO # If the SORT_GROUP_NAMES tag is set to YES then doxygen will sort the # hierarchy of group names into alphabetical order. If set to NO (the default) # the group names will appear in their defined order. SORT_GROUP_NAMES = NO # If the SORT_BY_SCOPE_NAME tag is set to YES, the class list will be # sorted by fully-qualified names, including namespaces. If set to # NO (the default), the class list will be sorted only by class name, # not including the namespace part. # Note: This option is not very useful if HIDE_SCOPE_NAMES is set to YES. # Note: This option applies only to the class list, not to the # alphabetical list. SORT_BY_SCOPE_NAME = NO # The GENERATE_TODOLIST tag can be used to enable (YES) or # disable (NO) the todo list. This list is created by putting \todo # commands in the documentation. GENERATE_TODOLIST = YES # The GENERATE_TESTLIST tag can be used to enable (YES) or # disable (NO) the test list. This list is created by putting \test # commands in the documentation. GENERATE_TESTLIST = YES # The GENERATE_BUGLIST tag can be used to enable (YES) or # disable (NO) the bug list. This list is created by putting \bug # commands in the documentation. GENERATE_BUGLIST = YES # The GENERATE_DEPRECATEDLIST tag can be used to enable (YES) or # disable (NO) the deprecated list. This list is created by putting # \deprecated commands in the documentation. GENERATE_DEPRECATEDLIST= YES # The ENABLED_SECTIONS tag can be used to enable conditional # documentation sections, marked by \if sectionname ... \endif. ENABLED_SECTIONS = # The MAX_INITIALIZER_LINES tag determines the maximum number of lines # the initial value of a variable or define consists of for it to appear in # the documentation. If the initializer consists of more lines than specified # here it will be hidden. Use a value of 0 to hide initializers completely. # The appearance of the initializer of individual variables and defines in the # documentation can be controlled using \showinitializer or \hideinitializer # command in the documentation regardless of this setting. MAX_INITIALIZER_LINES = 30 # Set the SHOW_USED_FILES tag to NO to disable the list of files generated # at the bottom of the documentation of classes and structs. If set to YES the # list will mention the files that were used to generate the documentation. SHOW_USED_FILES = YES # If the sources in your project are distributed over multiple directories # then setting the SHOW_DIRECTORIES tag to YES will show the directory hierarchy # in the documentation. The default is NO. SHOW_DIRECTORIES = NO # Set the SHOW_FILES tag to NO to disable the generation of the Files page. # This will remove the Files entry from the Quick Index and from the # Folder Tree View (if specified). The default is YES. SHOW_FILES = YES # Set the SHOW_NAMESPACES tag to NO to disable the generation of the # Namespaces page. This will remove the Namespaces entry from the Quick Index # and from the Folder Tree View (if specified). The default is YES. SHOW_NAMESPACES = YES # The FILE_VERSION_FILTER tag can be used to specify a program or script that # doxygen should invoke to get the current version for each file (typically from # the version control system). Doxygen will invoke the program by executing (via # popen()) the command , where is the value of # the FILE_VERSION_FILTER tag, and is the name of an input file # provided by doxygen. Whatever the program writes to standard output # is used as the file version. See the manual for examples. FILE_VERSION_FILTER = # The LAYOUT_FILE tag can be used to specify a layout file which will be parsed by # doxygen. The layout file controls the global structure of the generated output files # in an output format independent way. The create the layout file that represents # doxygen's defaults, run doxygen with the -l option. You can optionally specify a # file name after the option, if omitted DoxygenLayout.xml will be used as the name # of the layout file. LAYOUT_FILE = #--------------------------------------------------------------------------- # configuration options related to warning and progress messages #--------------------------------------------------------------------------- # The QUIET tag can be used to turn on/off the messages that are generated # by doxygen. Possible values are YES and NO. If left blank NO is used. QUIET = NO # The WARNINGS tag can be used to turn on/off the warning messages that are # generated by doxygen. Possible values are YES and NO. If left blank # NO is used. WARNINGS = YES # If WARN_IF_UNDOCUMENTED is set to YES, then doxygen will generate warnings # for undocumented members. If EXTRACT_ALL is set to YES then this flag will # automatically be disabled. WARN_IF_UNDOCUMENTED = YES # If WARN_IF_DOC_ERROR is set to YES, doxygen will generate warnings for # potential errors in the documentation, such as not documenting some # parameters in a documented function, or documenting parameters that # don't exist or using markup commands wrongly. WARN_IF_DOC_ERROR = YES # This WARN_NO_PARAMDOC option can be abled to get warnings for # functions that are documented, but have no documentation for their parameters # or return value. If set to NO (the default) doxygen will only warn about # wrong or incomplete parameter documentation, but not about the absence of # documentation. WARN_NO_PARAMDOC = NO # The WARN_FORMAT tag determines the format of the warning messages that # doxygen can produce. The string should contain the $file, $line, and $text # tags, which will be replaced by the file and line number from which the # warning originated and the warning text. Optionally the format may contain # $version, which will be replaced by the version of the file (if it could # be obtained via FILE_VERSION_FILTER) WARN_FORMAT = "$file:$line: $text" # The WARN_LOGFILE tag can be used to specify a file to which warning # and error messages should be written. If left blank the output is written # to stderr. WARN_LOGFILE = #--------------------------------------------------------------------------- # configuration options related to the input files #--------------------------------------------------------------------------- # The INPUT tag can be used to specify the files and/or directories that contain # documented source files. You may enter file names like "myfile.cpp" or # directories like "/usr/src/myproject". Separate the files or directories # with spaces. INPUT = . tools/lsmcli # This tag can be used to specify the character encoding of the source files # that doxygen parses. Internally doxygen uses the UTF-8 encoding, which is # also the default input encoding. Doxygen uses libiconv (or the iconv built # into libc) for the transcoding. See http://www.gnu.org/software/libiconv for # the list of possible encodings. INPUT_ENCODING = UTF-8 # If the value of the INPUT tag contains directories, you can use the # FILE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp # and *.h) to filter out the source-files in the directories. If left # blank the following patterns are tested: # *.c *.cc *.cxx *.cpp *.c++ *.java *.ii *.ixx *.ipp *.i++ *.inl *.h *.hh *.hxx # *.hpp *.h++ *.idl *.odl *.cs *.php *.php3 *.inc *.m *.mm *.py *.f90 FILE_PATTERNS = *.c \ *.cc \ *.cxx \ *.cpp \ *.c++ \ *.d \ *.java \ *.ii \ *.ixx \ *.ipp \ *.i++ \ *.inl \ *.h \ *.hh \ *.hxx \ *.hpp \ *.h++ \ *.idl \ *.odl \ *.cs \ *.php \ *.php3 \ *.inc \ *.m \ *.mm \ *.dox \ *.py \ *.f90 \ *.f \ *.vhd \ *.vhdl # The RECURSIVE tag can be used to turn specify whether or not subdirectories # should be searched for input files as well. Possible values are YES and NO. # If left blank NO is used. RECURSIVE = YES # The EXCLUDE tag can be used to specify files and/or directories that should # excluded from the INPUT source files. This way you can easily exclude a # subdirectory from a directory tree whose root is specified with the INPUT tag. EXCLUDE = # The EXCLUDE_SYMLINKS tag can be used select whether or not files or # directories that are symbolic links (a Unix filesystem feature) are excluded # from the input. EXCLUDE_SYMLINKS = NO # If the value of the INPUT tag contains directories, you can use the # EXCLUDE_PATTERNS tag to specify one or more wildcard patterns to exclude # certain files from those directories. Note that the wildcards are matched # against the file with absolute path, so to exclude all test directories # for example use the pattern */test/* EXCLUDE_PATTERNS = */test/* # The EXCLUDE_SYMBOLS tag can be used to specify one or more symbol names # (namespaces, classes, functions, etc.) that should be excluded from the # output. The symbol name can be a fully qualified name, a word, or if the # wildcard * is used, a substring. Examples: ANamespace, AClass, # AClass::ANamespace, ANamespace::*Test EXCLUDE_SYMBOLS = # The EXAMPLE_PATH tag can be used to specify one or more files or # directories that contain example code fragments that are included (see # the \include command). EXAMPLE_PATH = # If the value of the EXAMPLE_PATH tag contains directories, you can use the # EXAMPLE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp # and *.h) to filter out the source-files in the directories. If left # blank all files are included. EXAMPLE_PATTERNS = * # If the EXAMPLE_RECURSIVE tag is set to YES then subdirectories will be # searched for input files to be used with the \include or \dontinclude # commands irrespective of the value of the RECURSIVE tag. # Possible values are YES and NO. If left blank NO is used. EXAMPLE_RECURSIVE = NO # The IMAGE_PATH tag can be used to specify one or more files or # directories that contain image that are included in the documentation (see # the \image command). IMAGE_PATH = # The INPUT_FILTER tag can be used to specify a program that doxygen should # invoke to filter for each input file. Doxygen will invoke the filter program # by executing (via popen()) the command , where # is the value of the INPUT_FILTER tag, and is the name of an # input file. Doxygen will then use the output that the filter program writes # to standard output. If FILTER_PATTERNS is specified, this tag will be # ignored. INPUT_FILTER = # The FILTER_PATTERNS tag can be used to specify filters on a per file pattern # basis. Doxygen will compare the file name with each pattern and apply the # filter if there is a match. The filters are a list of the form: # pattern=filter (like *.cpp=my_cpp_filter). See INPUT_FILTER for further # info on how filters are used. If FILTER_PATTERNS is empty, INPUT_FILTER # is applied to all files. FILTER_PATTERNS = # If the FILTER_SOURCE_FILES tag is set to YES, the input filter (if set using # INPUT_FILTER) will be used to filter the input files when producing source # files to browse (i.e. when SOURCE_BROWSER is set to YES). FILTER_SOURCE_FILES = NO #--------------------------------------------------------------------------- # configuration options related to source browsing #--------------------------------------------------------------------------- # If the SOURCE_BROWSER tag is set to YES then a list of source files will # be generated. Documented entities will be cross-referenced with these sources. # Note: To get rid of all source code in the generated output, make sure also # VERBATIM_HEADERS is set to NO. SOURCE_BROWSER = YES # Setting the INLINE_SOURCES tag to YES will include the body # of functions and classes directly in the documentation. INLINE_SOURCES = NO # Setting the STRIP_CODE_COMMENTS tag to YES (the default) will instruct # doxygen to hide any special comment blocks from generated source code # fragments. Normal C and C++ comments will always remain visible. STRIP_CODE_COMMENTS = YES # If the REFERENCED_BY_RELATION tag is set to YES # then for each documented function all documented # functions referencing it will be listed. REFERENCED_BY_RELATION = NO # If the REFERENCES_RELATION tag is set to YES # then for each documented function all documented entities # called/used by that function will be listed. REFERENCES_RELATION = NO # If the REFERENCES_LINK_SOURCE tag is set to YES (the default) # and SOURCE_BROWSER tag is set to YES, then the hyperlinks from # functions in REFERENCES_RELATION and REFERENCED_BY_RELATION lists will # link to the source code. Otherwise they will link to the documentation. REFERENCES_LINK_SOURCE = YES # If the USE_HTAGS tag is set to YES then the references to source code # will point to the HTML generated by the htags(1) tool instead of doxygen # built-in source browser. The htags tool is part of GNU's global source # tagging system (see http://www.gnu.org/software/global/global.html). You # will need version 4.8.6 or higher. USE_HTAGS = NO # If the VERBATIM_HEADERS tag is set to YES (the default) then Doxygen # will generate a verbatim copy of the header file for each class for # which an include is specified. Set to NO to disable this. VERBATIM_HEADERS = YES #--------------------------------------------------------------------------- # configuration options related to the alphabetical class index #--------------------------------------------------------------------------- # If the ALPHABETICAL_INDEX tag is set to YES, an alphabetical index # of all compounds will be generated. Enable this if the project # contains a lot of classes, structs, unions or interfaces. ALPHABETICAL_INDEX = YES # If the alphabetical index is enabled (see ALPHABETICAL_INDEX) then # the COLS_IN_ALPHA_INDEX tag can be used to specify the number of columns # in which this list will be split (can be a number in the range [1..20]) COLS_IN_ALPHA_INDEX = 5 # In case all classes in a project start with a common prefix, all # classes will be put under the same header in the alphabetical index. # The IGNORE_PREFIX tag can be used to specify one or more prefixes that # should be ignored while generating the index headers. IGNORE_PREFIX = #--------------------------------------------------------------------------- # configuration options related to the HTML output #--------------------------------------------------------------------------- # If the GENERATE_HTML tag is set to YES (the default) Doxygen will # generate HTML output. GENERATE_HTML = YES # The HTML_OUTPUT tag is used to specify where the HTML docs will be put. # If a relative path is entered the value of OUTPUT_DIRECTORY will be # put in front of it. If left blank `html' will be used as the default path. HTML_OUTPUT = html # The HTML_FILE_EXTENSION tag can be used to specify the file extension for # each generated HTML page (for example: .htm,.php,.asp). If it is left blank # doxygen will generate files with .html extension. HTML_FILE_EXTENSION = .html # The HTML_HEADER tag can be used to specify a personal HTML header for # each generated HTML page. If it is left blank doxygen will generate a # standard header. HTML_HEADER = # The HTML_FOOTER tag can be used to specify a personal HTML footer for # each generated HTML page. If it is left blank doxygen will generate a # standard footer. HTML_FOOTER = # If the HTML_TIMESTAMP tag is set to YES then the generated HTML # documentation will contain the timesstamp. HTML_TIMESTAMP = NO # The HTML_STYLESHEET tag can be used to specify a user-defined cascading # style sheet that is used by each HTML page. It can be used to # fine-tune the look of the HTML output. If the tag is left blank doxygen # will generate a default style sheet. Note that doxygen will try to copy # the style sheet file to the HTML output directory, so don't put your own # stylesheet in the HTML output directory as well, or it will be erased! HTML_STYLESHEET = # If the HTML_ALIGN_MEMBERS tag is set to YES, the members of classes, # files or namespaces will be aligned in HTML using tables. If set to # NO a bullet list will be used. HTML_ALIGN_MEMBERS = YES # If the HTML_DYNAMIC_SECTIONS tag is set to YES then the generated HTML # documentation will contain sections that can be hidden and shown after the # page has loaded. For this to work a browser that supports # JavaScript and DHTML is required (for instance Mozilla 1.0+, Firefox # Netscape 6.0+, Internet explorer 5.0+, Konqueror, or Safari). HTML_DYNAMIC_SECTIONS = NO # If the GENERATE_DOCSET tag is set to YES, additional index files # will be generated that can be used as input for Apple's Xcode 3 # integrated development environment, introduced with OSX 10.5 (Leopard). # To create a documentation set, doxygen will generate a Makefile in the # HTML output directory. Running make will produce the docset in that # directory and running "make install" will install the docset in # ~/Library/Developer/Shared/Documentation/DocSets so that Xcode will find # it at startup. # See http://developer.apple.com/tools/creatingdocsetswithdoxygen.html for more information. GENERATE_DOCSET = NO # When GENERATE_DOCSET tag is set to YES, this tag determines the name of the # feed. A documentation feed provides an umbrella under which multiple # documentation sets from a single provider (such as a company or product suite) # can be grouped. DOCSET_FEEDNAME = "Doxygen generated docs" # When GENERATE_DOCSET tag is set to YES, this tag specifies a string that # should uniquely identify the documentation set bundle. This should be a # reverse domain-name style string, e.g. com.mycompany.MyDocSet. Doxygen # will append .docset to the name. DOCSET_BUNDLE_ID = org.doxygen.Project # If the GENERATE_HTMLHELP tag is set to YES, additional index files # will be generated that can be used as input for tools like the # Microsoft HTML help workshop to generate a compiled HTML help file (.chm) # of the generated HTML documentation. GENERATE_HTMLHELP = NO # If the GENERATE_HTMLHELP tag is set to YES, the CHM_FILE tag can # be used to specify the file name of the resulting .chm file. You # can add a path in front of the file if the result should not be # written to the html output directory. CHM_FILE = # If the GENERATE_HTMLHELP tag is set to YES, the HHC_LOCATION tag can # be used to specify the location (absolute path including file name) of # the HTML help compiler (hhc.exe). If non-empty doxygen will try to run # the HTML help compiler on the generated index.hhp. HHC_LOCATION = # If the GENERATE_HTMLHELP tag is set to YES, the GENERATE_CHI flag # controls if a separate .chi index file is generated (YES) or that # it should be included in the master .chm file (NO). GENERATE_CHI = NO # If the GENERATE_HTMLHELP tag is set to YES, the CHM_INDEX_ENCODING # is used to encode HtmlHelp index (hhk), content (hhc) and project file # content. CHM_INDEX_ENCODING = # If the GENERATE_HTMLHELP tag is set to YES, the BINARY_TOC flag # controls whether a binary table of contents is generated (YES) or a # normal table of contents (NO) in the .chm file. BINARY_TOC = NO # The TOC_EXPAND flag can be set to YES to add extra items for group members # to the contents of the HTML help documentation and to the tree view. TOC_EXPAND = NO # If the GENERATE_QHP tag is set to YES and both QHP_NAMESPACE and QHP_VIRTUAL_FOLDER # are set, an additional index file will be generated that can be used as input for # Qt's qhelpgenerator to generate a Qt Compressed Help (.qch) of the generated # HTML documentation. GENERATE_QHP = NO # If the QHG_LOCATION tag is specified, the QCH_FILE tag can # be used to specify the file name of the resulting .qch file. # The path specified is relative to the HTML output folder. QCH_FILE = # The QHP_NAMESPACE tag specifies the namespace to use when generating # Qt Help Project output. For more information please see # http://doc.trolltech.com/qthelpproject.html#namespace QHP_NAMESPACE = # The QHP_VIRTUAL_FOLDER tag specifies the namespace to use when generating # Qt Help Project output. For more information please see # http://doc.trolltech.com/qthelpproject.html#virtual-folders QHP_VIRTUAL_FOLDER = doc # If QHP_CUST_FILTER_NAME is set, it specifies the name of a custom filter to add. # For more information please see # http://doc.trolltech.com/qthelpproject.html#custom-filters QHP_CUST_FILTER_NAME = # The QHP_CUST_FILT_ATTRS tag specifies the list of the attributes of the custom filter to add.For more information please see # Qt Help Project / Custom Filters. QHP_CUST_FILTER_ATTRS = # The QHP_SECT_FILTER_ATTRS tag specifies the list of the attributes this project's # filter section matches. # Qt Help Project / Filter Attributes. QHP_SECT_FILTER_ATTRS = # If the GENERATE_QHP tag is set to YES, the QHG_LOCATION tag can # be used to specify the location of Qt's qhelpgenerator. # If non-empty doxygen will try to run qhelpgenerator on the generated # .qhp file. QHG_LOCATION = # The DISABLE_INDEX tag can be used to turn on/off the condensed index at # top of each HTML page. The value NO (the default) enables the index and # the value YES disables it. DISABLE_INDEX = NO # This tag can be used to set the number of enum values (range [1..20]) # that doxygen will group on one line in the generated HTML documentation. ENUM_VALUES_PER_LINE = 4 # The GENERATE_TREEVIEW tag is used to specify whether a tree-like index # structure should be generated to display hierarchical information. # If the tag value is set to YES, a side panel will be generated # containing a tree-like index structure (just like the one that # is generated for HTML Help). For this to work a browser that supports # JavaScript, DHTML, CSS and frames is required (i.e. any modern browser). # Windows users are probably better off using the HTML help feature. GENERATE_TREEVIEW = YES # By enabling USE_INLINE_TREES, doxygen will generate the Groups, Directories, # and Class Hierarchy pages using a tree view instead of an ordered list. USE_INLINE_TREES = NO # If the treeview is enabled (see GENERATE_TREEVIEW) then this tag can be # used to set the initial width (in pixels) of the frame in which the tree # is shown. TREEVIEW_WIDTH = 250 # Use this tag to change the font size of Latex formulas included # as images in the HTML documentation. The default is 10. Note that # when you change the font size after a successful doxygen run you need # to manually remove any form_*.png images from the HTML output directory # to force them to be regenerated. FORMULA_FONTSIZE = 10 # When the SEARCHENGINE tag is enable doxygen will generate a search box # for the HTML output. The underlying search engine uses javascript # and DHTML and should work on any modern browser. Note that when using # HTML help (GENERATE_HTMLHELP) or Qt help (GENERATE_QHP) # there is already a search function so this one should typically # be disabled. SEARCHENGINE = NO #--------------------------------------------------------------------------- # configuration options related to the LaTeX output #--------------------------------------------------------------------------- # If the GENERATE_LATEX tag is set to YES (the default) Doxygen will # generate Latex output. GENERATE_LATEX = NO # The LATEX_OUTPUT tag is used to specify where the LaTeX docs will be put. # If a relative path is entered the value of OUTPUT_DIRECTORY will be # put in front of it. If left blank `latex' will be used as the default path. LATEX_OUTPUT = latex # The LATEX_CMD_NAME tag can be used to specify the LaTeX command name to be # invoked. If left blank `latex' will be used as the default command name. LATEX_CMD_NAME = latex # The MAKEINDEX_CMD_NAME tag can be used to specify the command name to # generate index for LaTeX. If left blank `makeindex' will be used as the # default command name. MAKEINDEX_CMD_NAME = makeindex # If the COMPACT_LATEX tag is set to YES Doxygen generates more compact # LaTeX documents. This may be useful for small projects and may help to # save some trees in general. COMPACT_LATEX = NO # The PAPER_TYPE tag can be used to set the paper type that is used # by the printer. Possible values are: a4, a4wide, letter, legal and # executive. If left blank a4wide will be used. PAPER_TYPE = a4wide # The EXTRA_PACKAGES tag can be to specify one or more names of LaTeX # packages that should be included in the LaTeX output. EXTRA_PACKAGES = # The LATEX_HEADER tag can be used to specify a personal LaTeX header for # the generated latex document. The header should contain everything until # the first chapter. If it is left blank doxygen will generate a # standard header. Notice: only use this tag if you know what you are doing! LATEX_HEADER = # If the PDF_HYPERLINKS tag is set to YES, the LaTeX that is generated # is prepared for conversion to pdf (using ps2pdf). The pdf file will # contain links (just like the HTML output) instead of page references # This makes the output suitable for online browsing using a pdf viewer. PDF_HYPERLINKS = YES # If the USE_PDFLATEX tag is set to YES, pdflatex will be used instead of # plain latex in the generated Makefile. Set this option to YES to get a # higher quality PDF documentation. USE_PDFLATEX = YES # If the LATEX_BATCHMODE tag is set to YES, doxygen will add the \\batchmode. # command to the generated LaTeX files. This will instruct LaTeX to keep # running if errors occur, instead of asking the user for help. # This option is also used when generating formulas in HTML. LATEX_BATCHMODE = NO # If LATEX_HIDE_INDICES is set to YES then doxygen will not # include the index chapters (such as File Index, Compound Index, etc.) # in the output. LATEX_HIDE_INDICES = NO # If LATEX_SOURCE_CODE is set to YES then doxygen will include # source code with syntax highlighting in the LaTeX output. # Note that which sources are shown also depends on other settings # such as SOURCE_BROWSER. LATEX_SOURCE_CODE = NO #--------------------------------------------------------------------------- # configuration options related to the RTF output #--------------------------------------------------------------------------- # If the GENERATE_RTF tag is set to YES Doxygen will generate RTF output # The RTF output is optimized for Word 97 and may not look very pretty with # other RTF readers or editors. GENERATE_RTF = NO # The RTF_OUTPUT tag is used to specify where the RTF docs will be put. # If a relative path is entered the value of OUTPUT_DIRECTORY will be # put in front of it. If left blank `rtf' will be used as the default path. RTF_OUTPUT = rtf # If the COMPACT_RTF tag is set to YES Doxygen generates more compact # RTF documents. This may be useful for small projects and may help to # save some trees in general. COMPACT_RTF = NO # If the RTF_HYPERLINKS tag is set to YES, the RTF that is generated # will contain hyperlink fields. The RTF file will # contain links (just like the HTML output) instead of page references. # This makes the output suitable for online browsing using WORD or other # programs which support those fields. # Note: wordpad (write) and others do not support links. RTF_HYPERLINKS = NO # Load stylesheet definitions from file. Syntax is similar to doxygen's # config file, i.e. a series of assignments. You only have to provide # replacements, missing definitions are set to their default value. RTF_STYLESHEET_FILE = # Set optional variables used in the generation of an rtf document. # Syntax is similar to doxygen's config file. RTF_EXTENSIONS_FILE = #--------------------------------------------------------------------------- # configuration options related to the man page output #--------------------------------------------------------------------------- # If the GENERATE_MAN tag is set to YES (the default) Doxygen will # generate man pages GENERATE_MAN = NO # The MAN_OUTPUT tag is used to specify where the man pages will be put. # If a relative path is entered the value of OUTPUT_DIRECTORY will be # put in front of it. If left blank `man' will be used as the default path. MAN_OUTPUT = man # The MAN_EXTENSION tag determines the extension that is added to # the generated man pages (default is the subroutine's section .3) MAN_EXTENSION = .3 # If the MAN_LINKS tag is set to YES and Doxygen generates man output, # then it will generate one additional man file for each entity # documented in the real man page(s). These additional files # only source the real man page, but without them the man command # would be unable to find the correct page. The default is NO. MAN_LINKS = NO #--------------------------------------------------------------------------- # configuration options related to the XML output #--------------------------------------------------------------------------- # If the GENERATE_XML tag is set to YES Doxygen will # generate an XML file that captures the structure of # the code including all documentation. GENERATE_XML = NO # The XML_OUTPUT tag is used to specify where the XML pages will be put. # If a relative path is entered the value of OUTPUT_DIRECTORY will be # put in front of it. If left blank `xml' will be used as the default path. XML_OUTPUT = xml # The XML_SCHEMA tag can be used to specify an XML schema, # which can be used by a validating XML parser to check the # syntax of the XML files. XML_SCHEMA = # The XML_DTD tag can be used to specify an XML DTD, # which can be used by a validating XML parser to check the # syntax of the XML files. XML_DTD = # If the XML_PROGRAMLISTING tag is set to YES Doxygen will # dump the program listings (including syntax highlighting # and cross-referencing information) to the XML output. Note that # enabling this will significantly increase the size of the XML output. XML_PROGRAMLISTING = YES #--------------------------------------------------------------------------- # configuration options for the AutoGen Definitions output #--------------------------------------------------------------------------- # If the GENERATE_AUTOGEN_DEF tag is set to YES Doxygen will # generate an AutoGen Definitions (see autogen.sf.net) file # that captures the structure of the code including all # documentation. Note that this feature is still experimental # and incomplete at the moment. GENERATE_AUTOGEN_DEF = NO #--------------------------------------------------------------------------- # configuration options related to the Perl module output #--------------------------------------------------------------------------- # If the GENERATE_PERLMOD tag is set to YES Doxygen will # generate a Perl module file that captures the structure of # the code including all documentation. Note that this # feature is still experimental and incomplete at the # moment. GENERATE_PERLMOD = NO # If the PERLMOD_LATEX tag is set to YES Doxygen will generate # the necessary Makefile rules, Perl scripts and LaTeX code to be able # to generate PDF and DVI output from the Perl module output. PERLMOD_LATEX = NO # If the PERLMOD_PRETTY tag is set to YES the Perl module output will be # nicely formatted so it can be parsed by a human reader. This is useful # if you want to understand what is going on. On the other hand, if this # tag is set to NO the size of the Perl module output will be much smaller # and Perl will parse it just the same. PERLMOD_PRETTY = YES # The names of the make variables in the generated doxyrules.make file # are prefixed with the string contained in PERLMOD_MAKEVAR_PREFIX. # This is useful so different doxyrules.make files included by the same # Makefile don't overwrite each other's variables. PERLMOD_MAKEVAR_PREFIX = #--------------------------------------------------------------------------- # Configuration options related to the preprocessor #--------------------------------------------------------------------------- # If the ENABLE_PREPROCESSING tag is set to YES (the default) Doxygen will # evaluate all C-preprocessor directives found in the sources and include # files. ENABLE_PREPROCESSING = YES # If the MACRO_EXPANSION tag is set to YES Doxygen will expand all macro # names in the source code. If set to NO (the default) only conditional # compilation will be performed. Macro expansion can be done in a controlled # way by setting EXPAND_ONLY_PREDEF to YES. MACRO_EXPANSION = NO # If the EXPAND_ONLY_PREDEF and MACRO_EXPANSION tags are both set to YES # then the macro expansion is limited to the macros specified with the # PREDEFINED and EXPAND_AS_DEFINED tags. EXPAND_ONLY_PREDEF = NO # If the SEARCH_INCLUDES tag is set to YES (the default) the includes files # in the INCLUDE_PATH (see below) will be search if a #include is found. SEARCH_INCLUDES = YES # The INCLUDE_PATH tag can be used to specify one or more directories that # contain include files that are not input files but should be processed by # the preprocessor. INCLUDE_PATH = # You can use the INCLUDE_FILE_PATTERNS tag to specify one or more wildcard # patterns (like *.h and *.hpp) to filter out the header-files in the # directories. If left blank, the patterns specified with FILE_PATTERNS will # be used. INCLUDE_FILE_PATTERNS = # The PREDEFINED tag can be used to specify one or more macro names that # are defined before the preprocessor is started (similar to the -D option of # gcc). The argument of the tag is a list of macros of the form: name # or name=definition (no spaces). If the definition and the = are # omitted =1 is assumed. To prevent a macro definition from being # undefined via #undef or recursively expanded use the := operator # instead of the = operator. PREDEFINED = # If the MACRO_EXPANSION and EXPAND_ONLY_PREDEF tags are set to YES then # this tag can be used to specify a list of macro names that should be expanded. # The macro definition that is found in the sources will be used. # Use the PREDEFINED tag if you want to use a different macro definition. EXPAND_AS_DEFINED = # If the SKIP_FUNCTION_MACROS tag is set to YES (the default) then # doxygen's preprocessor will remove all function-like macros that are alone # on a line, have an all uppercase name, and do not end with a semicolon. Such # function macros are typically used for boiler-plate code, and will confuse # the parser if not removed. SKIP_FUNCTION_MACROS = YES #--------------------------------------------------------------------------- # Configuration::additions related to external references #--------------------------------------------------------------------------- # The TAGFILES option can be used to specify one or more tagfiles. # Optionally an initial location of the external documentation # can be added for each tagfile. The format of a tag file without # this location is as follows: # TAGFILES = file1 file2 ... # Adding location for the tag files is done as follows: # TAGFILES = file1=loc1 "file2 = loc2" ... # where "loc1" and "loc2" can be relative or absolute paths or # URLs. If a location is present for each tag, the installdox tool # does not have to be run to correct the links. # Note that each tag file must have a unique name # (where the name does NOT include the path) # If a tag file is not located in the directory in which doxygen # is run, you must also specify the path to the tagfile here. TAGFILES = # When a file name is specified after GENERATE_TAGFILE, doxygen will create # a tag file that is based on the input files it reads. GENERATE_TAGFILE = # If the ALLEXTERNALS tag is set to YES all external classes will be listed # in the class index. If set to NO only the inherited external classes # will be listed. ALLEXTERNALS = NO # If the EXTERNAL_GROUPS tag is set to YES all external groups will be listed # in the modules index. If set to NO, only the current project's groups will # be listed. EXTERNAL_GROUPS = YES # The PERL_PATH should be the absolute path and name of the perl script # interpreter (i.e. the result of `which perl'). PERL_PATH = /usr/bin/perl #--------------------------------------------------------------------------- # Configuration options related to the dot tool #--------------------------------------------------------------------------- # If the CLASS_DIAGRAMS tag is set to YES (the default) Doxygen will # generate a inheritance diagram (in HTML, RTF and LaTeX) for classes with base # or super classes. Setting the tag to NO turns the diagrams off. Note that # this option is superseded by the HAVE_DOT option below. This is only a # fallback. It is recommended to install and use dot, since it yields more # powerful graphs. CLASS_DIAGRAMS = NO # You can define message sequence charts within doxygen comments using the \msc # command. Doxygen will then run the mscgen tool (see # http://www.mcternan.me.uk/mscgen/) to produce the chart and insert it in the # documentation. The MSCGEN_PATH tag allows you to specify the directory where # the mscgen tool resides. If left empty the tool is assumed to be found in the # default search path. MSCGEN_PATH = # If set to YES, the inheritance and collaboration graphs will hide # inheritance and usage relations if the target is undocumented # or is not a class. HIDE_UNDOC_RELATIONS = YES # If you set the HAVE_DOT tag to YES then doxygen will assume the dot tool is # available from the path. This tool is part of Graphviz, a graph visualization # toolkit from AT&T and Lucent Bell Labs. The other options in this section # have no effect if this option is set to NO (the default) HAVE_DOT = YES # By default doxygen will write a font called FreeSans.ttf to the output # directory and reference it in all dot files that doxygen generates. This # font does not include all possible unicode characters however, so when you need # these (or just want a differently looking font) you can specify the font name # using DOT_FONTNAME. You need need to make sure dot is able to find the font, # which can be done by putting it in a standard location or by setting the # DOTFONTPATH environment variable or by setting DOT_FONTPATH to the directory # containing the font. DOT_FONTNAME = FreeSans # The DOT_FONTSIZE tag can be used to set the size of the font of dot graphs. # The default size is 10pt. DOT_FONTSIZE = 10 # By default doxygen will tell dot to use the output directory to look for the # FreeSans.ttf font (which doxygen will put there itself). If you specify a # different font using DOT_FONTNAME you can set the path where dot # can find it using this tag. DOT_FONTPATH = # If the CLASS_GRAPH and HAVE_DOT tags are set to YES then doxygen # will generate a graph for each documented class showing the direct and # indirect inheritance relations. Setting this tag to YES will force the # the CLASS_DIAGRAMS tag to NO. CLASS_GRAPH = YES # If the COLLABORATION_GRAPH and HAVE_DOT tags are set to YES then doxygen # will generate a graph for each documented class showing the direct and # indirect implementation dependencies (inheritance, containment, and # class references variables) of the class with other documented classes. COLLABORATION_GRAPH = YES # If the GROUP_GRAPHS and HAVE_DOT tags are set to YES then doxygen # will generate a graph for groups, showing the direct groups dependencies GROUP_GRAPHS = YES # If the UML_LOOK tag is set to YES doxygen will generate inheritance and # collaboration diagrams in a style similar to the OMG's Unified Modeling # Language. UML_LOOK = NO # If set to YES, the inheritance and collaboration graphs will show the # relations between templates and their instances. TEMPLATE_RELATIONS = NO # If the ENABLE_PREPROCESSING, SEARCH_INCLUDES, INCLUDE_GRAPH, and HAVE_DOT # tags are set to YES then doxygen will generate a graph for each documented # file showing the direct and indirect include dependencies of the file with # other documented files. INCLUDE_GRAPH = YES # If the ENABLE_PREPROCESSING, SEARCH_INCLUDES, INCLUDED_BY_GRAPH, and # HAVE_DOT tags are set to YES then doxygen will generate a graph for each # documented header file showing the documented files that directly or # indirectly include this file. INCLUDED_BY_GRAPH = YES # If the CALL_GRAPH and HAVE_DOT options are set to YES then # doxygen will generate a call dependency graph for every global function # or class method. Note that enabling this option will significantly increase # the time of a run. So in most cases it will be better to enable call graphs # for selected functions only using the \callgraph command. CALL_GRAPH = YES # If the CALLER_GRAPH and HAVE_DOT tags are set to YES then # doxygen will generate a caller dependency graph for every global function # or class method. Note that enabling this option will significantly increase # the time of a run. So in most cases it will be better to enable caller # graphs for selected functions only using the \callergraph command. CALLER_GRAPH = YES # If the GRAPHICAL_HIERARCHY and HAVE_DOT tags are set to YES then doxygen # will graphical hierarchy of all classes instead of a textual one. GRAPHICAL_HIERARCHY = YES # If the DIRECTORY_GRAPH, SHOW_DIRECTORIES and HAVE_DOT tags are set to YES # then doxygen will show the dependencies a directory has on other directories # in a graphical way. The dependency relations are determined by the #include # relations between the files in the directories. DIRECTORY_GRAPH = YES # The DOT_IMAGE_FORMAT tag can be used to set the image format of the images # generated by dot. Possible values are png, jpg, or gif # If left blank png will be used. DOT_IMAGE_FORMAT = png # The tag DOT_PATH can be used to specify the path where the dot tool can be # found. If left blank, it is assumed the dot tool can be found in the path. DOT_PATH = # The DOTFILE_DIRS tag can be used to specify one or more directories that # contain dot files that are included in the documentation (see the # \dotfile command). DOTFILE_DIRS = # The DOT_GRAPH_MAX_NODES tag can be used to set the maximum number of # nodes that will be shown in the graph. If the number of nodes in a graph # becomes larger than this value, doxygen will truncate the graph, which is # visualized by representing a node as a red box. Note that doxygen if the # number of direct children of the root node in a graph is already larger than # DOT_GRAPH_MAX_NODES then the graph will not be shown at all. Also note # that the size of a graph can be further restricted by MAX_DOT_GRAPH_DEPTH. DOT_GRAPH_MAX_NODES = 50 # The MAX_DOT_GRAPH_DEPTH tag can be used to set the maximum depth of the # graphs generated by dot. A depth value of 3 means that only nodes reachable # from the root by following a path via at most 3 edges will be shown. Nodes # that lay further from the root node will be omitted. Note that setting this # option to 1 or 2 may greatly reduce the computation time needed for large # code bases. Also note that the size of a graph can be further restricted by # DOT_GRAPH_MAX_NODES. Using a depth of 0 means no depth restriction. MAX_DOT_GRAPH_DEPTH = 0 # Set the DOT_TRANSPARENT tag to YES to generate images with a transparent # background. This is disabled by default, because dot on Windows does not # seem to support this out of the box. Warning: Depending on the platform used, # enabling this option may lead to badly anti-aliased labels on the edges of # a graph (i.e. they become hard to read). DOT_TRANSPARENT = NO # Set the DOT_MULTI_TARGETS tag to YES allow dot to generate multiple output # files in one run (i.e. multiple -o and -T options on the command line). This # makes dot run faster, but since only newer versions of dot (>1.8.10) # support this, this feature is disabled by default. DOT_MULTI_TARGETS = NO # If the GENERATE_LEGEND tag is set to YES (the default) Doxygen will # generate a legend page explaining the meaning of the various boxes and # arrows in the dot generated graphs. GENERATE_LEGEND = YES # If the DOT_CLEANUP tag is set to YES (the default) Doxygen will # remove the intermediate dot files that are used to generate # the various graphs. DOT_CLEANUP = YES libstoragemgmt-1.2.3/doc/doxygen.conf0000664000175000017500000020003112542455451014554 00000000000000# Doxyfile 1.6.1 # This file describes the settings to be used by the documentation system # doxygen (www.doxygen.org) for a project # # All text after a hash (#) is considered a comment and will be ignored # The format is: # TAG = value [value, ...] # For lists items can also be appended using: # TAG += value [value, ...] # Values that contain spaces should be placed between quotes (" ") #--------------------------------------------------------------------------- # Project related configuration options #--------------------------------------------------------------------------- # This tag specifies the encoding used for all characters in the config file # that follow. The default is UTF-8 which is also the encoding used for all # text before the first occurrence of this tag. Doxygen uses libiconv (or the # iconv built into libc) for the transcoding. See # http://www.gnu.org/software/libiconv for the list of possible encodings. DOXYFILE_ENCODING = UTF-8 # The PROJECT_NAME tag is a single word (or a sequence of words surrounded # by quotes) that should identify the project. PROJECT_NAME = libStorageMgmt # The PROJECT_NUMBER tag can be used to enter a project or revision number. # This could be handy for archiving the generated documentation or # if some version control system is used. PROJECT_NUMBER = 1.2.3 # The OUTPUT_DIRECTORY tag is used to specify the (relative or absolute) # base path where the generated documentation will be put. # If a relative path is entered, it will be relative to the location # where doxygen was started. If left blank the current directory will be used. OUTPUT_DIRECTORY = ./doc/srcdoc/ # If the CREATE_SUBDIRS tag is set to YES, then doxygen will create # 4096 sub-directories (in 2 levels) under the output directory of each output # format and will distribute the generated files over these directories. # Enabling this option can be useful when feeding doxygen a huge amount of # source files, where putting all generated files in the same directory would # otherwise cause performance problems for the file system. CREATE_SUBDIRS = NO # The OUTPUT_LANGUAGE tag is used to specify the language in which all # documentation generated by doxygen is written. Doxygen will use this # information to generate all constant output in the proper language. # The default language is English, other supported languages are: # Afrikaans, Arabic, Brazilian, Catalan, Chinese, Chinese-Traditional, # Croatian, Czech, Danish, Dutch, Esperanto, Farsi, Finnish, French, German, # Greek, Hungarian, Italian, Japanese, Japanese-en (Japanese with English # messages), Korean, Korean-en, Lithuanian, Norwegian, Macedonian, Persian, # Polish, Portuguese, Romanian, Russian, Serbian, Serbian-Cyrilic, Slovak, # Slovene, Spanish, Swedish, Ukrainian, and Vietnamese. OUTPUT_LANGUAGE = English # If the BRIEF_MEMBER_DESC tag is set to YES (the default) Doxygen will # include brief member descriptions after the members that are listed in # the file and class documentation (similar to JavaDoc). # Set to NO to disable this. BRIEF_MEMBER_DESC = YES # If the REPEAT_BRIEF tag is set to YES (the default) Doxygen will prepend # the brief description of a member or function before the detailed description. # Note: if both HIDE_UNDOC_MEMBERS and BRIEF_MEMBER_DESC are set to NO, the # brief descriptions will be completely suppressed. REPEAT_BRIEF = YES # This tag implements a quasi-intelligent brief description abbreviator # that is used to form the text in various listings. Each string # in this list, if found as the leading text of the brief description, will be # stripped from the text and the result after processing the whole list, is # used as the annotated text. Otherwise, the brief description is used as-is. # If left blank, the following values are used ("$name" is automatically # replaced with the name of the entity): "The $name class" "The $name widget" # "The $name file" "is" "provides" "specifies" "contains" # "represents" "a" "an" "the" ABBREVIATE_BRIEF = "The $name class" \ "The $name widget" \ "The $name file" \ is \ provides \ specifies \ contains \ represents \ a \ an \ the # If the ALWAYS_DETAILED_SEC and REPEAT_BRIEF tags are both set to YES then # Doxygen will generate a detailed section even if there is only a brief # description. ALWAYS_DETAILED_SEC = NO # If the INLINE_INHERITED_MEMB tag is set to YES, doxygen will show all # inherited members of a class in the documentation of that class as if those # members were ordinary class members. Constructors, destructors and assignment # operators of the base classes will not be shown. INLINE_INHERITED_MEMB = NO # If the FULL_PATH_NAMES tag is set to YES then Doxygen will prepend the full # path before files name in the file list and in the header files. If set # to NO the shortest path that makes the file name unique will be used. FULL_PATH_NAMES = NO # If the FULL_PATH_NAMES tag is set to YES then the STRIP_FROM_PATH tag # can be used to strip a user-defined part of the path. Stripping is # only done if one of the specified strings matches the left-hand part of # the path. The tag can be used to show relative paths in the file list. # If left blank the directory from which doxygen is run is used as the # path to strip. STRIP_FROM_PATH = /Users/dimitri/doxygen/mail/1.5.7/doxywizard/ # The STRIP_FROM_INC_PATH tag can be used to strip a user-defined part of # the path mentioned in the documentation of a class, which tells # the reader which header file to include in order to use a class. # If left blank only the name of the header file containing the class # definition is used. Otherwise one should specify the include paths that # are normally passed to the compiler using the -I flag. STRIP_FROM_INC_PATH = # If the SHORT_NAMES tag is set to YES, doxygen will generate much shorter # (but less readable) file names. This can be useful is your file systems # doesn't support long names like on DOS, Mac, or CD-ROM. SHORT_NAMES = NO # If the JAVADOC_AUTOBRIEF tag is set to YES then Doxygen # will interpret the first line (until the first dot) of a JavaDoc-style # comment as the brief description. If set to NO, the JavaDoc # comments will behave just like regular Qt-style comments # (thus requiring an explicit @brief command for a brief description.) JAVADOC_AUTOBRIEF = NO # If the QT_AUTOBRIEF tag is set to YES then Doxygen will # interpret the first line (until the first dot) of a Qt-style # comment as the brief description. If set to NO, the comments # will behave just like regular Qt-style comments (thus requiring # an explicit \brief command for a brief description.) QT_AUTOBRIEF = NO # The MULTILINE_CPP_IS_BRIEF tag can be set to YES to make Doxygen # treat a multi-line C++ special comment block (i.e. a block of //! or /// # comments) as a brief description. This used to be the default behaviour. # The new default is to treat a multi-line C++ comment block as a detailed # description. Set this tag to YES if you prefer the old behaviour instead. MULTILINE_CPP_IS_BRIEF = NO # If the INHERIT_DOCS tag is set to YES (the default) then an undocumented # member inherits the documentation from any documented member that it # re-implements. INHERIT_DOCS = YES # If the SEPARATE_MEMBER_PAGES tag is set to YES, then doxygen will produce # a new page for each member. If set to NO, the documentation of a member will # be part of the file/class/namespace that contains it. SEPARATE_MEMBER_PAGES = NO # The TAB_SIZE tag can be used to set the number of spaces in a tab. # Doxygen uses this value to replace tabs by spaces in code fragments. TAB_SIZE = 4 # This tag can be used to specify a number of aliases that acts # as commands in the documentation. An alias has the form "name=value". # For example adding "sideeffect=\par Side Effects:\n" will allow you to # put the command \sideeffect (or @sideeffect) in the documentation, which # will result in a user-defined paragraph with heading "Side Effects:". # You can put \n's in the value part of an alias to insert newlines. ALIASES = # Set the OPTIMIZE_OUTPUT_FOR_C tag to YES if your project consists of C # sources only. Doxygen will then generate output that is more tailored for C. # For instance, some of the names that are used will be different. The list # of all members will be omitted, etc. OPTIMIZE_OUTPUT_FOR_C = YES # Set the OPTIMIZE_OUTPUT_JAVA tag to YES if your project consists of Java # sources only. Doxygen will then generate output that is more tailored for # Java. For instance, namespaces will be presented as packages, qualified # scopes will look different, etc. OPTIMIZE_OUTPUT_JAVA = NO # Set the OPTIMIZE_FOR_FORTRAN tag to YES if your project consists of Fortran # sources only. Doxygen will then generate output that is more tailored for # Fortran. OPTIMIZE_FOR_FORTRAN = NO # Set the OPTIMIZE_OUTPUT_VHDL tag to YES if your project consists of VHDL # sources. Doxygen will then generate output that is tailored for # VHDL. OPTIMIZE_OUTPUT_VHDL = NO # Doxygen selects the parser to use depending on the extension of the files it parses. # With this tag you can assign which parser to use for a given extension. # Doxygen has a built-in mapping, but you can override or extend it using this tag. # The format is ext=language, where ext is a file extension, and language is one of # the parsers supported by doxygen: IDL, Java, Javascript, C#, C, C++, D, PHP, # Objective-C, Python, Fortran, VHDL, C, C++. For instance to make doxygen treat # .inc files as Fortran files (default is PHP), and .f files as C (default is Fortran), # use: inc=Fortran f=C. Note that for custom extensions you also need to set # FILE_PATTERNS otherwise the files are not read by doxygen. EXTENSION_MAPPING = # If you use STL classes (i.e. std::string, std::vector, etc.) but do not want # to include (a tag file for) the STL sources as input, then you should # set this tag to YES in order to let doxygen match functions declarations and # definitions whose arguments contain STL classes (e.g. func(std::string); v.s. # func(std::string) {}). This also make the inheritance and collaboration # diagrams that involve STL classes more complete and accurate. BUILTIN_STL_SUPPORT = NO # If you use Microsoft's C++/CLI language, you should set this option to YES to # enable parsing support. CPP_CLI_SUPPORT = NO # Set the SIP_SUPPORT tag to YES if your project consists of sip sources only. # Doxygen will parse them like normal C++ but will assume all classes use public # instead of private inheritance when no explicit protection keyword is present. SIP_SUPPORT = NO # For Microsoft's IDL there are propget and propput attributes to indicate getter # and setter methods for a property. Setting this option to YES (the default) # will make doxygen to replace the get and set methods by a property in the # documentation. This will only work if the methods are indeed getting or # setting a simple type. If this is not the case, or you want to show the # methods anyway, you should set this option to NO. IDL_PROPERTY_SUPPORT = YES # If member grouping is used in the documentation and the DISTRIBUTE_GROUP_DOC # tag is set to YES, then doxygen will reuse the documentation of the first # member in the group (if any) for the other members of the group. By default # all members of a group must be documented explicitly. DISTRIBUTE_GROUP_DOC = NO # Set the SUBGROUPING tag to YES (the default) to allow class member groups of # the same type (for instance a group of public functions) to be put as a # subgroup of that type (e.g. under the Public Functions section). Set it to # NO to prevent subgrouping. Alternatively, this can be done per class using # the \nosubgrouping command. SUBGROUPING = YES # When TYPEDEF_HIDES_STRUCT is enabled, a typedef of a struct, union, or enum # is documented as struct, union, or enum with the name of the typedef. So # typedef struct TypeS {} TypeT, will appear in the documentation as a struct # with name TypeT. When disabled the typedef will appear as a member of a file, # namespace, or class. And the struct will be named TypeS. This can typically # be useful for C code in case the coding convention dictates that all compound # types are typedef'ed and only the typedef is referenced, never the tag name. TYPEDEF_HIDES_STRUCT = NO # The SYMBOL_CACHE_SIZE determines the size of the internal cache use to # determine which symbols to keep in memory and which to flush to disk. # When the cache is full, less often used symbols will be written to disk. # For small to medium size projects (<1000 input files) the default value is # probably good enough. For larger projects a too small cache size can cause # doxygen to be busy swapping symbols to and from disk most of the time # causing a significant performance penality. # If the system has enough physical memory increasing the cache will improve the # performance by keeping more symbols in memory. Note that the value works on # a logarithmic scale so increasing the size by one will rougly double the # memory usage. The cache size is given by this formula: # 2^(16+SYMBOL_CACHE_SIZE). The valid range is 0..9, the default is 0, # corresponding to a cache size of 2^16 = 65536 symbols SYMBOL_CACHE_SIZE = 0 #--------------------------------------------------------------------------- # Build related configuration options #--------------------------------------------------------------------------- # If the EXTRACT_ALL tag is set to YES doxygen will assume all entities in # documentation are documented, even if no documentation was available. # Private class members and static file members will be hidden unless # the EXTRACT_PRIVATE and EXTRACT_STATIC tags are set to YES EXTRACT_ALL = YES # If the EXTRACT_PRIVATE tag is set to YES all private members of a class # will be included in the documentation. EXTRACT_PRIVATE = NO # If the EXTRACT_STATIC tag is set to YES all static members of a file # will be included in the documentation. EXTRACT_STATIC = NO # If the EXTRACT_LOCAL_CLASSES tag is set to YES classes (and structs) # defined locally in source files will be included in the documentation. # If set to NO only classes defined in header files are included. EXTRACT_LOCAL_CLASSES = YES # This flag is only useful for Objective-C code. When set to YES local # methods, which are defined in the implementation section but not in # the interface are included in the documentation. # If set to NO (the default) only methods in the interface are included. EXTRACT_LOCAL_METHODS = NO # If this flag is set to YES, the members of anonymous namespaces will be # extracted and appear in the documentation as a namespace called # 'anonymous_namespace{file}', where file will be replaced with the base # name of the file that contains the anonymous namespace. By default # anonymous namespace are hidden. EXTRACT_ANON_NSPACES = NO # If the HIDE_UNDOC_MEMBERS tag is set to YES, Doxygen will hide all # undocumented members of documented classes, files or namespaces. # If set to NO (the default) these members will be included in the # various overviews, but no documentation section is generated. # This option has no effect if EXTRACT_ALL is enabled. HIDE_UNDOC_MEMBERS = NO # If the HIDE_UNDOC_CLASSES tag is set to YES, Doxygen will hide all # undocumented classes that are normally visible in the class hierarchy. # If set to NO (the default) these classes will be included in the various # overviews. This option has no effect if EXTRACT_ALL is enabled. HIDE_UNDOC_CLASSES = NO # If the HIDE_FRIEND_COMPOUNDS tag is set to YES, Doxygen will hide all # friend (class|struct|union) declarations. # If set to NO (the default) these declarations will be included in the # documentation. HIDE_FRIEND_COMPOUNDS = NO # If the HIDE_IN_BODY_DOCS tag is set to YES, Doxygen will hide any # documentation blocks found inside the body of a function. # If set to NO (the default) these blocks will be appended to the # function's detailed documentation block. HIDE_IN_BODY_DOCS = NO # The INTERNAL_DOCS tag determines if documentation # that is typed after a \internal command is included. If the tag is set # to NO (the default) then the documentation will be excluded. # Set it to YES to include the internal documentation. INTERNAL_DOCS = NO # If the CASE_SENSE_NAMES tag is set to NO then Doxygen will only generate # file names in lower-case letters. If set to YES upper-case letters are also # allowed. This is useful if you have classes or files whose names only differ # in case and if your file system supports case sensitive file names. Windows # and Mac users are advised to set this option to NO. CASE_SENSE_NAMES = NO # If the HIDE_SCOPE_NAMES tag is set to NO (the default) then Doxygen # will show members with their full class and namespace scopes in the # documentation. If set to YES the scope will be hidden. HIDE_SCOPE_NAMES = YES # If the SHOW_INCLUDE_FILES tag is set to YES (the default) then Doxygen # will put a list of the files that are included by a file in the documentation # of that file. SHOW_INCLUDE_FILES = YES # If the INLINE_INFO tag is set to YES (the default) then a tag [inline] # is inserted in the documentation for inline members. INLINE_INFO = YES # If the SORT_MEMBER_DOCS tag is set to YES (the default) then doxygen # will sort the (detailed) documentation of file and class members # alphabetically by member name. If set to NO the members will appear in # declaration order. SORT_MEMBER_DOCS = YES # If the SORT_BRIEF_DOCS tag is set to YES then doxygen will sort the # brief documentation of file, namespace and class members alphabetically # by member name. If set to NO (the default) the members will appear in # declaration order. SORT_BRIEF_DOCS = NO # If the SORT_MEMBERS_CTORS_1ST tag is set to YES then doxygen # will sort the (brief and detailed) documentation of class members so that # constructors and destructors are listed first. If set to NO (the default) # the constructors will appear in the respective orders defined by # SORT_MEMBER_DOCS and SORT_BRIEF_DOCS. # This tag will be ignored for brief docs if SORT_BRIEF_DOCS is set to NO # and ignored for detailed docs if SORT_MEMBER_DOCS is set to NO. SORT_MEMBERS_CTORS_1ST = NO # If the SORT_GROUP_NAMES tag is set to YES then doxygen will sort the # hierarchy of group names into alphabetical order. If set to NO (the default) # the group names will appear in their defined order. SORT_GROUP_NAMES = NO # If the SORT_BY_SCOPE_NAME tag is set to YES, the class list will be # sorted by fully-qualified names, including namespaces. If set to # NO (the default), the class list will be sorted only by class name, # not including the namespace part. # Note: This option is not very useful if HIDE_SCOPE_NAMES is set to YES. # Note: This option applies only to the class list, not to the # alphabetical list. SORT_BY_SCOPE_NAME = NO # The GENERATE_TODOLIST tag can be used to enable (YES) or # disable (NO) the todo list. This list is created by putting \todo # commands in the documentation. GENERATE_TODOLIST = YES # The GENERATE_TESTLIST tag can be used to enable (YES) or # disable (NO) the test list. This list is created by putting \test # commands in the documentation. GENERATE_TESTLIST = YES # The GENERATE_BUGLIST tag can be used to enable (YES) or # disable (NO) the bug list. This list is created by putting \bug # commands in the documentation. GENERATE_BUGLIST = YES # The GENERATE_DEPRECATEDLIST tag can be used to enable (YES) or # disable (NO) the deprecated list. This list is created by putting # \deprecated commands in the documentation. GENERATE_DEPRECATEDLIST= YES # The ENABLED_SECTIONS tag can be used to enable conditional # documentation sections, marked by \if sectionname ... \endif. ENABLED_SECTIONS = # The MAX_INITIALIZER_LINES tag determines the maximum number of lines # the initial value of a variable or define consists of for it to appear in # the documentation. If the initializer consists of more lines than specified # here it will be hidden. Use a value of 0 to hide initializers completely. # The appearance of the initializer of individual variables and defines in the # documentation can be controlled using \showinitializer or \hideinitializer # command in the documentation regardless of this setting. MAX_INITIALIZER_LINES = 30 # Set the SHOW_USED_FILES tag to NO to disable the list of files generated # at the bottom of the documentation of classes and structs. If set to YES the # list will mention the files that were used to generate the documentation. SHOW_USED_FILES = YES # If the sources in your project are distributed over multiple directories # then setting the SHOW_DIRECTORIES tag to YES will show the directory hierarchy # in the documentation. The default is NO. SHOW_DIRECTORIES = NO # Set the SHOW_FILES tag to NO to disable the generation of the Files page. # This will remove the Files entry from the Quick Index and from the # Folder Tree View (if specified). The default is YES. SHOW_FILES = YES # Set the SHOW_NAMESPACES tag to NO to disable the generation of the # Namespaces page. This will remove the Namespaces entry from the Quick Index # and from the Folder Tree View (if specified). The default is YES. SHOW_NAMESPACES = YES # The FILE_VERSION_FILTER tag can be used to specify a program or script that # doxygen should invoke to get the current version for each file (typically from # the version control system). Doxygen will invoke the program by executing (via # popen()) the command , where is the value of # the FILE_VERSION_FILTER tag, and is the name of an input file # provided by doxygen. Whatever the program writes to standard output # is used as the file version. See the manual for examples. FILE_VERSION_FILTER = # The LAYOUT_FILE tag can be used to specify a layout file which will be parsed by # doxygen. The layout file controls the global structure of the generated output files # in an output format independent way. The create the layout file that represents # doxygen's defaults, run doxygen with the -l option. You can optionally specify a # file name after the option, if omitted DoxygenLayout.xml will be used as the name # of the layout file. LAYOUT_FILE = #--------------------------------------------------------------------------- # configuration options related to warning and progress messages #--------------------------------------------------------------------------- # The QUIET tag can be used to turn on/off the messages that are generated # by doxygen. Possible values are YES and NO. If left blank NO is used. QUIET = NO # The WARNINGS tag can be used to turn on/off the warning messages that are # generated by doxygen. Possible values are YES and NO. If left blank # NO is used. WARNINGS = YES # If WARN_IF_UNDOCUMENTED is set to YES, then doxygen will generate warnings # for undocumented members. If EXTRACT_ALL is set to YES then this flag will # automatically be disabled. WARN_IF_UNDOCUMENTED = YES # If WARN_IF_DOC_ERROR is set to YES, doxygen will generate warnings for # potential errors in the documentation, such as not documenting some # parameters in a documented function, or documenting parameters that # don't exist or using markup commands wrongly. WARN_IF_DOC_ERROR = YES # This WARN_NO_PARAMDOC option can be abled to get warnings for # functions that are documented, but have no documentation for their parameters # or return value. If set to NO (the default) doxygen will only warn about # wrong or incomplete parameter documentation, but not about the absence of # documentation. WARN_NO_PARAMDOC = NO # The WARN_FORMAT tag determines the format of the warning messages that # doxygen can produce. The string should contain the $file, $line, and $text # tags, which will be replaced by the file and line number from which the # warning originated and the warning text. Optionally the format may contain # $version, which will be replaced by the version of the file (if it could # be obtained via FILE_VERSION_FILTER) WARN_FORMAT = "$file:$line: $text" # The WARN_LOGFILE tag can be used to specify a file to which warning # and error messages should be written. If left blank the output is written # to stderr. WARN_LOGFILE = #--------------------------------------------------------------------------- # configuration options related to the input files #--------------------------------------------------------------------------- # The INPUT tag can be used to specify the files and/or directories that contain # documented source files. You may enter file names like "myfile.cpp" or # directories like "/usr/src/myproject". Separate the files or directories # with spaces. INPUT = . tools/lsmcli # This tag can be used to specify the character encoding of the source files # that doxygen parses. Internally doxygen uses the UTF-8 encoding, which is # also the default input encoding. Doxygen uses libiconv (or the iconv built # into libc) for the transcoding. See http://www.gnu.org/software/libiconv for # the list of possible encodings. INPUT_ENCODING = UTF-8 # If the value of the INPUT tag contains directories, you can use the # FILE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp # and *.h) to filter out the source-files in the directories. If left # blank the following patterns are tested: # *.c *.cc *.cxx *.cpp *.c++ *.java *.ii *.ixx *.ipp *.i++ *.inl *.h *.hh *.hxx # *.hpp *.h++ *.idl *.odl *.cs *.php *.php3 *.inc *.m *.mm *.py *.f90 FILE_PATTERNS = *.c \ *.cc \ *.cxx \ *.cpp \ *.c++ \ *.d \ *.java \ *.ii \ *.ixx \ *.ipp \ *.i++ \ *.inl \ *.h \ *.hh \ *.hxx \ *.hpp \ *.h++ \ *.idl \ *.odl \ *.cs \ *.php \ *.php3 \ *.inc \ *.m \ *.mm \ *.dox \ *.py \ *.f90 \ *.f \ *.vhd \ *.vhdl # The RECURSIVE tag can be used to turn specify whether or not subdirectories # should be searched for input files as well. Possible values are YES and NO. # If left blank NO is used. RECURSIVE = YES # The EXCLUDE tag can be used to specify files and/or directories that should # excluded from the INPUT source files. This way you can easily exclude a # subdirectory from a directory tree whose root is specified with the INPUT tag. EXCLUDE = # The EXCLUDE_SYMLINKS tag can be used select whether or not files or # directories that are symbolic links (a Unix filesystem feature) are excluded # from the input. EXCLUDE_SYMLINKS = NO # If the value of the INPUT tag contains directories, you can use the # EXCLUDE_PATTERNS tag to specify one or more wildcard patterns to exclude # certain files from those directories. Note that the wildcards are matched # against the file with absolute path, so to exclude all test directories # for example use the pattern */test/* EXCLUDE_PATTERNS = */test/* # The EXCLUDE_SYMBOLS tag can be used to specify one or more symbol names # (namespaces, classes, functions, etc.) that should be excluded from the # output. The symbol name can be a fully qualified name, a word, or if the # wildcard * is used, a substring. Examples: ANamespace, AClass, # AClass::ANamespace, ANamespace::*Test EXCLUDE_SYMBOLS = # The EXAMPLE_PATH tag can be used to specify one or more files or # directories that contain example code fragments that are included (see # the \include command). EXAMPLE_PATH = # If the value of the EXAMPLE_PATH tag contains directories, you can use the # EXAMPLE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp # and *.h) to filter out the source-files in the directories. If left # blank all files are included. EXAMPLE_PATTERNS = * # If the EXAMPLE_RECURSIVE tag is set to YES then subdirectories will be # searched for input files to be used with the \include or \dontinclude # commands irrespective of the value of the RECURSIVE tag. # Possible values are YES and NO. If left blank NO is used. EXAMPLE_RECURSIVE = NO # The IMAGE_PATH tag can be used to specify one or more files or # directories that contain image that are included in the documentation (see # the \image command). IMAGE_PATH = # The INPUT_FILTER tag can be used to specify a program that doxygen should # invoke to filter for each input file. Doxygen will invoke the filter program # by executing (via popen()) the command , where # is the value of the INPUT_FILTER tag, and is the name of an # input file. Doxygen will then use the output that the filter program writes # to standard output. If FILTER_PATTERNS is specified, this tag will be # ignored. INPUT_FILTER = # The FILTER_PATTERNS tag can be used to specify filters on a per file pattern # basis. Doxygen will compare the file name with each pattern and apply the # filter if there is a match. The filters are a list of the form: # pattern=filter (like *.cpp=my_cpp_filter). See INPUT_FILTER for further # info on how filters are used. If FILTER_PATTERNS is empty, INPUT_FILTER # is applied to all files. FILTER_PATTERNS = # If the FILTER_SOURCE_FILES tag is set to YES, the input filter (if set using # INPUT_FILTER) will be used to filter the input files when producing source # files to browse (i.e. when SOURCE_BROWSER is set to YES). FILTER_SOURCE_FILES = NO #--------------------------------------------------------------------------- # configuration options related to source browsing #--------------------------------------------------------------------------- # If the SOURCE_BROWSER tag is set to YES then a list of source files will # be generated. Documented entities will be cross-referenced with these sources. # Note: To get rid of all source code in the generated output, make sure also # VERBATIM_HEADERS is set to NO. SOURCE_BROWSER = YES # Setting the INLINE_SOURCES tag to YES will include the body # of functions and classes directly in the documentation. INLINE_SOURCES = NO # Setting the STRIP_CODE_COMMENTS tag to YES (the default) will instruct # doxygen to hide any special comment blocks from generated source code # fragments. Normal C and C++ comments will always remain visible. STRIP_CODE_COMMENTS = YES # If the REFERENCED_BY_RELATION tag is set to YES # then for each documented function all documented # functions referencing it will be listed. REFERENCED_BY_RELATION = NO # If the REFERENCES_RELATION tag is set to YES # then for each documented function all documented entities # called/used by that function will be listed. REFERENCES_RELATION = NO # If the REFERENCES_LINK_SOURCE tag is set to YES (the default) # and SOURCE_BROWSER tag is set to YES, then the hyperlinks from # functions in REFERENCES_RELATION and REFERENCED_BY_RELATION lists will # link to the source code. Otherwise they will link to the documentation. REFERENCES_LINK_SOURCE = YES # If the USE_HTAGS tag is set to YES then the references to source code # will point to the HTML generated by the htags(1) tool instead of doxygen # built-in source browser. The htags tool is part of GNU's global source # tagging system (see http://www.gnu.org/software/global/global.html). You # will need version 4.8.6 or higher. USE_HTAGS = NO # If the VERBATIM_HEADERS tag is set to YES (the default) then Doxygen # will generate a verbatim copy of the header file for each class for # which an include is specified. Set to NO to disable this. VERBATIM_HEADERS = YES #--------------------------------------------------------------------------- # configuration options related to the alphabetical class index #--------------------------------------------------------------------------- # If the ALPHABETICAL_INDEX tag is set to YES, an alphabetical index # of all compounds will be generated. Enable this if the project # contains a lot of classes, structs, unions or interfaces. ALPHABETICAL_INDEX = YES # If the alphabetical index is enabled (see ALPHABETICAL_INDEX) then # the COLS_IN_ALPHA_INDEX tag can be used to specify the number of columns # in which this list will be split (can be a number in the range [1..20]) COLS_IN_ALPHA_INDEX = 5 # In case all classes in a project start with a common prefix, all # classes will be put under the same header in the alphabetical index. # The IGNORE_PREFIX tag can be used to specify one or more prefixes that # should be ignored while generating the index headers. IGNORE_PREFIX = #--------------------------------------------------------------------------- # configuration options related to the HTML output #--------------------------------------------------------------------------- # If the GENERATE_HTML tag is set to YES (the default) Doxygen will # generate HTML output. GENERATE_HTML = YES # The HTML_OUTPUT tag is used to specify where the HTML docs will be put. # If a relative path is entered the value of OUTPUT_DIRECTORY will be # put in front of it. If left blank `html' will be used as the default path. HTML_OUTPUT = html # The HTML_FILE_EXTENSION tag can be used to specify the file extension for # each generated HTML page (for example: .htm,.php,.asp). If it is left blank # doxygen will generate files with .html extension. HTML_FILE_EXTENSION = .html # The HTML_HEADER tag can be used to specify a personal HTML header for # each generated HTML page. If it is left blank doxygen will generate a # standard header. HTML_HEADER = # The HTML_FOOTER tag can be used to specify a personal HTML footer for # each generated HTML page. If it is left blank doxygen will generate a # standard footer. HTML_FOOTER = # If the HTML_TIMESTAMP tag is set to YES then the generated HTML # documentation will contain the timesstamp. HTML_TIMESTAMP = NO # The HTML_STYLESHEET tag can be used to specify a user-defined cascading # style sheet that is used by each HTML page. It can be used to # fine-tune the look of the HTML output. If the tag is left blank doxygen # will generate a default style sheet. Note that doxygen will try to copy # the style sheet file to the HTML output directory, so don't put your own # stylesheet in the HTML output directory as well, or it will be erased! HTML_STYLESHEET = # If the HTML_ALIGN_MEMBERS tag is set to YES, the members of classes, # files or namespaces will be aligned in HTML using tables. If set to # NO a bullet list will be used. HTML_ALIGN_MEMBERS = YES # If the HTML_DYNAMIC_SECTIONS tag is set to YES then the generated HTML # documentation will contain sections that can be hidden and shown after the # page has loaded. For this to work a browser that supports # JavaScript and DHTML is required (for instance Mozilla 1.0+, Firefox # Netscape 6.0+, Internet explorer 5.0+, Konqueror, or Safari). HTML_DYNAMIC_SECTIONS = NO # If the GENERATE_DOCSET tag is set to YES, additional index files # will be generated that can be used as input for Apple's Xcode 3 # integrated development environment, introduced with OSX 10.5 (Leopard). # To create a documentation set, doxygen will generate a Makefile in the # HTML output directory. Running make will produce the docset in that # directory and running "make install" will install the docset in # ~/Library/Developer/Shared/Documentation/DocSets so that Xcode will find # it at startup. # See http://developer.apple.com/tools/creatingdocsetswithdoxygen.html for more information. GENERATE_DOCSET = NO # When GENERATE_DOCSET tag is set to YES, this tag determines the name of the # feed. A documentation feed provides an umbrella under which multiple # documentation sets from a single provider (such as a company or product suite) # can be grouped. DOCSET_FEEDNAME = "Doxygen generated docs" # When GENERATE_DOCSET tag is set to YES, this tag specifies a string that # should uniquely identify the documentation set bundle. This should be a # reverse domain-name style string, e.g. com.mycompany.MyDocSet. Doxygen # will append .docset to the name. DOCSET_BUNDLE_ID = org.doxygen.Project # If the GENERATE_HTMLHELP tag is set to YES, additional index files # will be generated that can be used as input for tools like the # Microsoft HTML help workshop to generate a compiled HTML help file (.chm) # of the generated HTML documentation. GENERATE_HTMLHELP = NO # If the GENERATE_HTMLHELP tag is set to YES, the CHM_FILE tag can # be used to specify the file name of the resulting .chm file. You # can add a path in front of the file if the result should not be # written to the html output directory. CHM_FILE = # If the GENERATE_HTMLHELP tag is set to YES, the HHC_LOCATION tag can # be used to specify the location (absolute path including file name) of # the HTML help compiler (hhc.exe). If non-empty doxygen will try to run # the HTML help compiler on the generated index.hhp. HHC_LOCATION = # If the GENERATE_HTMLHELP tag is set to YES, the GENERATE_CHI flag # controls if a separate .chi index file is generated (YES) or that # it should be included in the master .chm file (NO). GENERATE_CHI = NO # If the GENERATE_HTMLHELP tag is set to YES, the CHM_INDEX_ENCODING # is used to encode HtmlHelp index (hhk), content (hhc) and project file # content. CHM_INDEX_ENCODING = # If the GENERATE_HTMLHELP tag is set to YES, the BINARY_TOC flag # controls whether a binary table of contents is generated (YES) or a # normal table of contents (NO) in the .chm file. BINARY_TOC = NO # The TOC_EXPAND flag can be set to YES to add extra items for group members # to the contents of the HTML help documentation and to the tree view. TOC_EXPAND = NO # If the GENERATE_QHP tag is set to YES and both QHP_NAMESPACE and QHP_VIRTUAL_FOLDER # are set, an additional index file will be generated that can be used as input for # Qt's qhelpgenerator to generate a Qt Compressed Help (.qch) of the generated # HTML documentation. GENERATE_QHP = NO # If the QHG_LOCATION tag is specified, the QCH_FILE tag can # be used to specify the file name of the resulting .qch file. # The path specified is relative to the HTML output folder. QCH_FILE = # The QHP_NAMESPACE tag specifies the namespace to use when generating # Qt Help Project output. For more information please see # http://doc.trolltech.com/qthelpproject.html#namespace QHP_NAMESPACE = # The QHP_VIRTUAL_FOLDER tag specifies the namespace to use when generating # Qt Help Project output. For more information please see # http://doc.trolltech.com/qthelpproject.html#virtual-folders QHP_VIRTUAL_FOLDER = doc # If QHP_CUST_FILTER_NAME is set, it specifies the name of a custom filter to add. # For more information please see # http://doc.trolltech.com/qthelpproject.html#custom-filters QHP_CUST_FILTER_NAME = # The QHP_CUST_FILT_ATTRS tag specifies the list of the attributes of the custom filter to add.For more information please see # Qt Help Project / Custom Filters. QHP_CUST_FILTER_ATTRS = # The QHP_SECT_FILTER_ATTRS tag specifies the list of the attributes this project's # filter section matches. # Qt Help Project / Filter Attributes. QHP_SECT_FILTER_ATTRS = # If the GENERATE_QHP tag is set to YES, the QHG_LOCATION tag can # be used to specify the location of Qt's qhelpgenerator. # If non-empty doxygen will try to run qhelpgenerator on the generated # .qhp file. QHG_LOCATION = # The DISABLE_INDEX tag can be used to turn on/off the condensed index at # top of each HTML page. The value NO (the default) enables the index and # the value YES disables it. DISABLE_INDEX = NO # This tag can be used to set the number of enum values (range [1..20]) # that doxygen will group on one line in the generated HTML documentation. ENUM_VALUES_PER_LINE = 4 # The GENERATE_TREEVIEW tag is used to specify whether a tree-like index # structure should be generated to display hierarchical information. # If the tag value is set to YES, a side panel will be generated # containing a tree-like index structure (just like the one that # is generated for HTML Help). For this to work a browser that supports # JavaScript, DHTML, CSS and frames is required (i.e. any modern browser). # Windows users are probably better off using the HTML help feature. GENERATE_TREEVIEW = YES # By enabling USE_INLINE_TREES, doxygen will generate the Groups, Directories, # and Class Hierarchy pages using a tree view instead of an ordered list. USE_INLINE_TREES = NO # If the treeview is enabled (see GENERATE_TREEVIEW) then this tag can be # used to set the initial width (in pixels) of the frame in which the tree # is shown. TREEVIEW_WIDTH = 250 # Use this tag to change the font size of Latex formulas included # as images in the HTML documentation. The default is 10. Note that # when you change the font size after a successful doxygen run you need # to manually remove any form_*.png images from the HTML output directory # to force them to be regenerated. FORMULA_FONTSIZE = 10 # When the SEARCHENGINE tag is enable doxygen will generate a search box # for the HTML output. The underlying search engine uses javascript # and DHTML and should work on any modern browser. Note that when using # HTML help (GENERATE_HTMLHELP) or Qt help (GENERATE_QHP) # there is already a search function so this one should typically # be disabled. SEARCHENGINE = NO #--------------------------------------------------------------------------- # configuration options related to the LaTeX output #--------------------------------------------------------------------------- # If the GENERATE_LATEX tag is set to YES (the default) Doxygen will # generate Latex output. GENERATE_LATEX = NO # The LATEX_OUTPUT tag is used to specify where the LaTeX docs will be put. # If a relative path is entered the value of OUTPUT_DIRECTORY will be # put in front of it. If left blank `latex' will be used as the default path. LATEX_OUTPUT = latex # The LATEX_CMD_NAME tag can be used to specify the LaTeX command name to be # invoked. If left blank `latex' will be used as the default command name. LATEX_CMD_NAME = latex # The MAKEINDEX_CMD_NAME tag can be used to specify the command name to # generate index for LaTeX. If left blank `makeindex' will be used as the # default command name. MAKEINDEX_CMD_NAME = makeindex # If the COMPACT_LATEX tag is set to YES Doxygen generates more compact # LaTeX documents. This may be useful for small projects and may help to # save some trees in general. COMPACT_LATEX = NO # The PAPER_TYPE tag can be used to set the paper type that is used # by the printer. Possible values are: a4, a4wide, letter, legal and # executive. If left blank a4wide will be used. PAPER_TYPE = a4wide # The EXTRA_PACKAGES tag can be to specify one or more names of LaTeX # packages that should be included in the LaTeX output. EXTRA_PACKAGES = # The LATEX_HEADER tag can be used to specify a personal LaTeX header for # the generated latex document. The header should contain everything until # the first chapter. If it is left blank doxygen will generate a # standard header. Notice: only use this tag if you know what you are doing! LATEX_HEADER = # If the PDF_HYPERLINKS tag is set to YES, the LaTeX that is generated # is prepared for conversion to pdf (using ps2pdf). The pdf file will # contain links (just like the HTML output) instead of page references # This makes the output suitable for online browsing using a pdf viewer. PDF_HYPERLINKS = YES # If the USE_PDFLATEX tag is set to YES, pdflatex will be used instead of # plain latex in the generated Makefile. Set this option to YES to get a # higher quality PDF documentation. USE_PDFLATEX = YES # If the LATEX_BATCHMODE tag is set to YES, doxygen will add the \\batchmode. # command to the generated LaTeX files. This will instruct LaTeX to keep # running if errors occur, instead of asking the user for help. # This option is also used when generating formulas in HTML. LATEX_BATCHMODE = NO # If LATEX_HIDE_INDICES is set to YES then doxygen will not # include the index chapters (such as File Index, Compound Index, etc.) # in the output. LATEX_HIDE_INDICES = NO # If LATEX_SOURCE_CODE is set to YES then doxygen will include # source code with syntax highlighting in the LaTeX output. # Note that which sources are shown also depends on other settings # such as SOURCE_BROWSER. LATEX_SOURCE_CODE = NO #--------------------------------------------------------------------------- # configuration options related to the RTF output #--------------------------------------------------------------------------- # If the GENERATE_RTF tag is set to YES Doxygen will generate RTF output # The RTF output is optimized for Word 97 and may not look very pretty with # other RTF readers or editors. GENERATE_RTF = NO # The RTF_OUTPUT tag is used to specify where the RTF docs will be put. # If a relative path is entered the value of OUTPUT_DIRECTORY will be # put in front of it. If left blank `rtf' will be used as the default path. RTF_OUTPUT = rtf # If the COMPACT_RTF tag is set to YES Doxygen generates more compact # RTF documents. This may be useful for small projects and may help to # save some trees in general. COMPACT_RTF = NO # If the RTF_HYPERLINKS tag is set to YES, the RTF that is generated # will contain hyperlink fields. The RTF file will # contain links (just like the HTML output) instead of page references. # This makes the output suitable for online browsing using WORD or other # programs which support those fields. # Note: wordpad (write) and others do not support links. RTF_HYPERLINKS = NO # Load stylesheet definitions from file. Syntax is similar to doxygen's # config file, i.e. a series of assignments. You only have to provide # replacements, missing definitions are set to their default value. RTF_STYLESHEET_FILE = # Set optional variables used in the generation of an rtf document. # Syntax is similar to doxygen's config file. RTF_EXTENSIONS_FILE = #--------------------------------------------------------------------------- # configuration options related to the man page output #--------------------------------------------------------------------------- # If the GENERATE_MAN tag is set to YES (the default) Doxygen will # generate man pages GENERATE_MAN = NO # The MAN_OUTPUT tag is used to specify where the man pages will be put. # If a relative path is entered the value of OUTPUT_DIRECTORY will be # put in front of it. If left blank `man' will be used as the default path. MAN_OUTPUT = man # The MAN_EXTENSION tag determines the extension that is added to # the generated man pages (default is the subroutine's section .3) MAN_EXTENSION = .3 # If the MAN_LINKS tag is set to YES and Doxygen generates man output, # then it will generate one additional man file for each entity # documented in the real man page(s). These additional files # only source the real man page, but without them the man command # would be unable to find the correct page. The default is NO. MAN_LINKS = NO #--------------------------------------------------------------------------- # configuration options related to the XML output #--------------------------------------------------------------------------- # If the GENERATE_XML tag is set to YES Doxygen will # generate an XML file that captures the structure of # the code including all documentation. GENERATE_XML = NO # The XML_OUTPUT tag is used to specify where the XML pages will be put. # If a relative path is entered the value of OUTPUT_DIRECTORY will be # put in front of it. If left blank `xml' will be used as the default path. XML_OUTPUT = xml # The XML_SCHEMA tag can be used to specify an XML schema, # which can be used by a validating XML parser to check the # syntax of the XML files. XML_SCHEMA = # The XML_DTD tag can be used to specify an XML DTD, # which can be used by a validating XML parser to check the # syntax of the XML files. XML_DTD = # If the XML_PROGRAMLISTING tag is set to YES Doxygen will # dump the program listings (including syntax highlighting # and cross-referencing information) to the XML output. Note that # enabling this will significantly increase the size of the XML output. XML_PROGRAMLISTING = YES #--------------------------------------------------------------------------- # configuration options for the AutoGen Definitions output #--------------------------------------------------------------------------- # If the GENERATE_AUTOGEN_DEF tag is set to YES Doxygen will # generate an AutoGen Definitions (see autogen.sf.net) file # that captures the structure of the code including all # documentation. Note that this feature is still experimental # and incomplete at the moment. GENERATE_AUTOGEN_DEF = NO #--------------------------------------------------------------------------- # configuration options related to the Perl module output #--------------------------------------------------------------------------- # If the GENERATE_PERLMOD tag is set to YES Doxygen will # generate a Perl module file that captures the structure of # the code including all documentation. Note that this # feature is still experimental and incomplete at the # moment. GENERATE_PERLMOD = NO # If the PERLMOD_LATEX tag is set to YES Doxygen will generate # the necessary Makefile rules, Perl scripts and LaTeX code to be able # to generate PDF and DVI output from the Perl module output. PERLMOD_LATEX = NO # If the PERLMOD_PRETTY tag is set to YES the Perl module output will be # nicely formatted so it can be parsed by a human reader. This is useful # if you want to understand what is going on. On the other hand, if this # tag is set to NO the size of the Perl module output will be much smaller # and Perl will parse it just the same. PERLMOD_PRETTY = YES # The names of the make variables in the generated doxyrules.make file # are prefixed with the string contained in PERLMOD_MAKEVAR_PREFIX. # This is useful so different doxyrules.make files included by the same # Makefile don't overwrite each other's variables. PERLMOD_MAKEVAR_PREFIX = #--------------------------------------------------------------------------- # Configuration options related to the preprocessor #--------------------------------------------------------------------------- # If the ENABLE_PREPROCESSING tag is set to YES (the default) Doxygen will # evaluate all C-preprocessor directives found in the sources and include # files. ENABLE_PREPROCESSING = YES # If the MACRO_EXPANSION tag is set to YES Doxygen will expand all macro # names in the source code. If set to NO (the default) only conditional # compilation will be performed. Macro expansion can be done in a controlled # way by setting EXPAND_ONLY_PREDEF to YES. MACRO_EXPANSION = NO # If the EXPAND_ONLY_PREDEF and MACRO_EXPANSION tags are both set to YES # then the macro expansion is limited to the macros specified with the # PREDEFINED and EXPAND_AS_DEFINED tags. EXPAND_ONLY_PREDEF = NO # If the SEARCH_INCLUDES tag is set to YES (the default) the includes files # in the INCLUDE_PATH (see below) will be search if a #include is found. SEARCH_INCLUDES = YES # The INCLUDE_PATH tag can be used to specify one or more directories that # contain include files that are not input files but should be processed by # the preprocessor. INCLUDE_PATH = # You can use the INCLUDE_FILE_PATTERNS tag to specify one or more wildcard # patterns (like *.h and *.hpp) to filter out the header-files in the # directories. If left blank, the patterns specified with FILE_PATTERNS will # be used. INCLUDE_FILE_PATTERNS = # The PREDEFINED tag can be used to specify one or more macro names that # are defined before the preprocessor is started (similar to the -D option of # gcc). The argument of the tag is a list of macros of the form: name # or name=definition (no spaces). If the definition and the = are # omitted =1 is assumed. To prevent a macro definition from being # undefined via #undef or recursively expanded use the := operator # instead of the = operator. PREDEFINED = # If the MACRO_EXPANSION and EXPAND_ONLY_PREDEF tags are set to YES then # this tag can be used to specify a list of macro names that should be expanded. # The macro definition that is found in the sources will be used. # Use the PREDEFINED tag if you want to use a different macro definition. EXPAND_AS_DEFINED = # If the SKIP_FUNCTION_MACROS tag is set to YES (the default) then # doxygen's preprocessor will remove all function-like macros that are alone # on a line, have an all uppercase name, and do not end with a semicolon. Such # function macros are typically used for boiler-plate code, and will confuse # the parser if not removed. SKIP_FUNCTION_MACROS = YES #--------------------------------------------------------------------------- # Configuration::additions related to external references #--------------------------------------------------------------------------- # The TAGFILES option can be used to specify one or more tagfiles. # Optionally an initial location of the external documentation # can be added for each tagfile. The format of a tag file without # this location is as follows: # TAGFILES = file1 file2 ... # Adding location for the tag files is done as follows: # TAGFILES = file1=loc1 "file2 = loc2" ... # where "loc1" and "loc2" can be relative or absolute paths or # URLs. If a location is present for each tag, the installdox tool # does not have to be run to correct the links. # Note that each tag file must have a unique name # (where the name does NOT include the path) # If a tag file is not located in the directory in which doxygen # is run, you must also specify the path to the tagfile here. TAGFILES = # When a file name is specified after GENERATE_TAGFILE, doxygen will create # a tag file that is based on the input files it reads. GENERATE_TAGFILE = # If the ALLEXTERNALS tag is set to YES all external classes will be listed # in the class index. If set to NO only the inherited external classes # will be listed. ALLEXTERNALS = NO # If the EXTERNAL_GROUPS tag is set to YES all external groups will be listed # in the modules index. If set to NO, only the current project's groups will # be listed. EXTERNAL_GROUPS = YES # The PERL_PATH should be the absolute path and name of the perl script # interpreter (i.e. the result of `which perl'). PERL_PATH = /usr/bin/perl #--------------------------------------------------------------------------- # Configuration options related to the dot tool #--------------------------------------------------------------------------- # If the CLASS_DIAGRAMS tag is set to YES (the default) Doxygen will # generate a inheritance diagram (in HTML, RTF and LaTeX) for classes with base # or super classes. Setting the tag to NO turns the diagrams off. Note that # this option is superseded by the HAVE_DOT option below. This is only a # fallback. It is recommended to install and use dot, since it yields more # powerful graphs. CLASS_DIAGRAMS = NO # You can define message sequence charts within doxygen comments using the \msc # command. Doxygen will then run the mscgen tool (see # http://www.mcternan.me.uk/mscgen/) to produce the chart and insert it in the # documentation. The MSCGEN_PATH tag allows you to specify the directory where # the mscgen tool resides. If left empty the tool is assumed to be found in the # default search path. MSCGEN_PATH = # If set to YES, the inheritance and collaboration graphs will hide # inheritance and usage relations if the target is undocumented # or is not a class. HIDE_UNDOC_RELATIONS = YES # If you set the HAVE_DOT tag to YES then doxygen will assume the dot tool is # available from the path. This tool is part of Graphviz, a graph visualization # toolkit from AT&T and Lucent Bell Labs. The other options in this section # have no effect if this option is set to NO (the default) HAVE_DOT = YES # By default doxygen will write a font called FreeSans.ttf to the output # directory and reference it in all dot files that doxygen generates. This # font does not include all possible unicode characters however, so when you need # these (or just want a differently looking font) you can specify the font name # using DOT_FONTNAME. You need need to make sure dot is able to find the font, # which can be done by putting it in a standard location or by setting the # DOTFONTPATH environment variable or by setting DOT_FONTPATH to the directory # containing the font. DOT_FONTNAME = FreeSans # The DOT_FONTSIZE tag can be used to set the size of the font of dot graphs. # The default size is 10pt. DOT_FONTSIZE = 10 # By default doxygen will tell dot to use the output directory to look for the # FreeSans.ttf font (which doxygen will put there itself). If you specify a # different font using DOT_FONTNAME you can set the path where dot # can find it using this tag. DOT_FONTPATH = # If the CLASS_GRAPH and HAVE_DOT tags are set to YES then doxygen # will generate a graph for each documented class showing the direct and # indirect inheritance relations. Setting this tag to YES will force the # the CLASS_DIAGRAMS tag to NO. CLASS_GRAPH = YES # If the COLLABORATION_GRAPH and HAVE_DOT tags are set to YES then doxygen # will generate a graph for each documented class showing the direct and # indirect implementation dependencies (inheritance, containment, and # class references variables) of the class with other documented classes. COLLABORATION_GRAPH = YES # If the GROUP_GRAPHS and HAVE_DOT tags are set to YES then doxygen # will generate a graph for groups, showing the direct groups dependencies GROUP_GRAPHS = YES # If the UML_LOOK tag is set to YES doxygen will generate inheritance and # collaboration diagrams in a style similar to the OMG's Unified Modeling # Language. UML_LOOK = NO # If set to YES, the inheritance and collaboration graphs will show the # relations between templates and their instances. TEMPLATE_RELATIONS = NO # If the ENABLE_PREPROCESSING, SEARCH_INCLUDES, INCLUDE_GRAPH, and HAVE_DOT # tags are set to YES then doxygen will generate a graph for each documented # file showing the direct and indirect include dependencies of the file with # other documented files. INCLUDE_GRAPH = YES # If the ENABLE_PREPROCESSING, SEARCH_INCLUDES, INCLUDED_BY_GRAPH, and # HAVE_DOT tags are set to YES then doxygen will generate a graph for each # documented header file showing the documented files that directly or # indirectly include this file. INCLUDED_BY_GRAPH = YES # If the CALL_GRAPH and HAVE_DOT options are set to YES then # doxygen will generate a call dependency graph for every global function # or class method. Note that enabling this option will significantly increase # the time of a run. So in most cases it will be better to enable call graphs # for selected functions only using the \callgraph command. CALL_GRAPH = YES # If the CALLER_GRAPH and HAVE_DOT tags are set to YES then # doxygen will generate a caller dependency graph for every global function # or class method. Note that enabling this option will significantly increase # the time of a run. So in most cases it will be better to enable caller # graphs for selected functions only using the \callergraph command. CALLER_GRAPH = YES # If the GRAPHICAL_HIERARCHY and HAVE_DOT tags are set to YES then doxygen # will graphical hierarchy of all classes instead of a textual one. GRAPHICAL_HIERARCHY = YES # If the DIRECTORY_GRAPH, SHOW_DIRECTORIES and HAVE_DOT tags are set to YES # then doxygen will show the dependencies a directory has on other directories # in a graphical way. The dependency relations are determined by the #include # relations between the files in the directories. DIRECTORY_GRAPH = YES # The DOT_IMAGE_FORMAT tag can be used to set the image format of the images # generated by dot. Possible values are png, jpg, or gif # If left blank png will be used. DOT_IMAGE_FORMAT = png # The tag DOT_PATH can be used to specify the path where the dot tool can be # found. If left blank, it is assumed the dot tool can be found in the path. DOT_PATH = # The DOTFILE_DIRS tag can be used to specify one or more directories that # contain dot files that are included in the documentation (see the # \dotfile command). DOTFILE_DIRS = # The DOT_GRAPH_MAX_NODES tag can be used to set the maximum number of # nodes that will be shown in the graph. If the number of nodes in a graph # becomes larger than this value, doxygen will truncate the graph, which is # visualized by representing a node as a red box. Note that doxygen if the # number of direct children of the root node in a graph is already larger than # DOT_GRAPH_MAX_NODES then the graph will not be shown at all. Also note # that the size of a graph can be further restricted by MAX_DOT_GRAPH_DEPTH. DOT_GRAPH_MAX_NODES = 50 # The MAX_DOT_GRAPH_DEPTH tag can be used to set the maximum depth of the # graphs generated by dot. A depth value of 3 means that only nodes reachable # from the root by following a path via at most 3 edges will be shown. Nodes # that lay further from the root node will be omitted. Note that setting this # option to 1 or 2 may greatly reduce the computation time needed for large # code bases. Also note that the size of a graph can be further restricted by # DOT_GRAPH_MAX_NODES. Using a depth of 0 means no depth restriction. MAX_DOT_GRAPH_DEPTH = 0 # Set the DOT_TRANSPARENT tag to YES to generate images with a transparent # background. This is disabled by default, because dot on Windows does not # seem to support this out of the box. Warning: Depending on the platform used, # enabling this option may lead to badly anti-aliased labels on the edges of # a graph (i.e. they become hard to read). DOT_TRANSPARENT = NO # Set the DOT_MULTI_TARGETS tag to YES allow dot to generate multiple output # files in one run (i.e. multiple -o and -T options on the command line). This # makes dot run faster, but since only newer versions of dot (>1.8.10) # support this, this feature is disabled by default. DOT_MULTI_TARGETS = NO # If the GENERATE_LEGEND tag is set to YES (the default) Doxygen will # generate a legend page explaining the meaning of the various boxes and # arrows in the dot generated graphs. GENERATE_LEGEND = YES # If the DOT_CLEANUP tag is set to YES (the default) Doxygen will # remove the intermediate dot files that are used to generate # the various graphs. DOT_CLEANUP = YES libstoragemgmt-1.2.3/doc/Makefile.am0000664000175000017500000000006412537546123014271 00000000000000EXTRA_DIST=doxygen.conf REST_API_Doc SUBDIRS = man libstoragemgmt-1.2.3/doc/Makefile.in0000664000175000017500000004634612542455445014321 00000000000000# Makefile.in generated by automake 1.13.4 from Makefile.am. # @configure_input@ # Copyright (C) 1994-2013 Free Software Foundation, Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__is_gnu_make = test -n '$(MAKEFILE_LIST)' && test -n '$(MAKELEVEL)' am__make_running_with_option = \ case $${target_option-} in \ ?) ;; \ *) echo "am__make_running_with_option: internal error: invalid" \ "target option '$${target_option-}' specified" >&2; \ exit 1;; \ esac; \ has_opt=no; \ sane_makeflags=$$MAKEFLAGS; \ if $(am__is_gnu_make); then \ sane_makeflags=$$MFLAGS; \ else \ case $$MAKEFLAGS in \ *\\[\ \ ]*) \ bs=\\; \ sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \ | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \ esac; \ fi; \ skip_next=no; \ strip_trailopt () \ { \ flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \ }; \ for flg in $$sane_makeflags; do \ test $$skip_next = yes && { skip_next=no; continue; }; \ case $$flg in \ *=*|--*) continue;; \ -*I) strip_trailopt 'I'; skip_next=yes;; \ -*I?*) strip_trailopt 'I';; \ -*O) strip_trailopt 'O'; skip_next=yes;; \ -*O?*) strip_trailopt 'O';; \ -*l) strip_trailopt 'l'; skip_next=yes;; \ -*l?*) strip_trailopt 'l';; \ -[dEDm]) skip_next=yes;; \ -[JT]) skip_next=yes;; \ esac; \ case $$flg in \ *$$target_option*) has_opt=yes; break;; \ esac; \ done; \ test $$has_opt = yes am__make_dryrun = (target_option=n; $(am__make_running_with_option)) am__make_keepgoing = (target_option=k; $(am__make_running_with_option)) pkgdatadir = $(datadir)/@PACKAGE@ pkgincludedir = $(includedir)/@PACKAGE@ pkglibdir = $(libdir)/@PACKAGE@ pkglibexecdir = $(libexecdir)/@PACKAGE@ am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ subdir = doc DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/Makefile.am \ $(srcdir)/doxygen.conf.in ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = $(top_srcdir)/m4/ax_python_module.m4 \ $(top_srcdir)/m4/libtool.m4 $(top_srcdir)/m4/ltoptions.m4 \ $(top_srcdir)/m4/ltsugar.m4 $(top_srcdir)/m4/ltversion.m4 \ $(top_srcdir)/m4/lt~obsolete.m4 $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) mkinstalldirs = $(install_sh) -d CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = doxygen.conf CONFIG_CLEAN_VPATH_FILES = AM_V_P = $(am__v_P_@AM_V@) am__v_P_ = $(am__v_P_@AM_DEFAULT_V@) am__v_P_0 = false am__v_P_1 = : AM_V_GEN = $(am__v_GEN_@AM_V@) am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@) am__v_GEN_0 = @echo " GEN " $@; am__v_GEN_1 = AM_V_at = $(am__v_at_@AM_V@) am__v_at_ = $(am__v_at_@AM_DEFAULT_V@) am__v_at_0 = @ am__v_at_1 = SOURCES = DIST_SOURCES = RECURSIVE_TARGETS = all-recursive check-recursive cscopelist-recursive \ ctags-recursive dvi-recursive html-recursive info-recursive \ install-data-recursive install-dvi-recursive \ install-exec-recursive install-html-recursive \ install-info-recursive install-pdf-recursive \ install-ps-recursive install-recursive installcheck-recursive \ installdirs-recursive pdf-recursive ps-recursive \ tags-recursive uninstall-recursive am__can_run_installinfo = \ case $$AM_UPDATE_INFO_DIR in \ n|no|NO) false;; \ *) (install-info --version) >/dev/null 2>&1;; \ esac RECURSIVE_CLEAN_TARGETS = mostlyclean-recursive clean-recursive \ distclean-recursive maintainer-clean-recursive am__recursive_targets = \ $(RECURSIVE_TARGETS) \ $(RECURSIVE_CLEAN_TARGETS) \ $(am__extra_recursive_targets) AM_RECURSIVE_TARGETS = $(am__recursive_targets:-recursive=) TAGS CTAGS \ distdir am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) # Read a list of newline-separated strings from the standard input, # and print each of them once, without duplicates. Input order is # *not* preserved. am__uniquify_input = $(AWK) '\ BEGIN { nonempty = 0; } \ { items[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in items) print i; }; } \ ' # Make sure the list of sources is unique. This is necessary because, # e.g., the same source file might be shared among _SOURCES variables # for different programs/libraries. am__define_uniq_tagged_files = \ list='$(am__tagged_files)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | $(am__uniquify_input)` ETAGS = etags CTAGS = ctags DIST_SUBDIRS = $(SUBDIRS) DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) am__relativize = \ dir0=`pwd`; \ sed_first='s,^\([^/]*\)/.*$$,\1,'; \ sed_rest='s,^[^/]*/*,,'; \ sed_last='s,^.*/\([^/]*\)$$,\1,'; \ sed_butlast='s,/*[^/]*$$,,'; \ while test -n "$$dir1"; do \ first=`echo "$$dir1" | sed -e "$$sed_first"`; \ if test "$$first" != "."; then \ if test "$$first" = ".."; then \ dir2=`echo "$$dir0" | sed -e "$$sed_last"`/"$$dir2"; \ dir0=`echo "$$dir0" | sed -e "$$sed_butlast"`; \ else \ first2=`echo "$$dir2" | sed -e "$$sed_first"`; \ if test "$$first2" = "$$first"; then \ dir2=`echo "$$dir2" | sed -e "$$sed_rest"`; \ else \ dir2="../$$dir2"; \ fi; \ dir0="$$dir0"/"$$first"; \ fi; \ fi; \ dir1=`echo "$$dir1" | sed -e "$$sed_rest"`; \ done; \ reldir="$$dir2" ACLOCAL = @ACLOCAL@ AMTAR = @AMTAR@ AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@ AR = @AR@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ FGREP = @FGREP@ GREP = @GREP@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ JSON_CFLAGS = @JSON_CFLAGS@ JSON_LIBS = @JSON_LIBS@ LD = @LD@ LDFLAGS = @LDFLAGS@ LIBCHECK_CFLAGS = @LIBCHECK_CFLAGS@ LIBCHECK_LIBS = @LIBCHECK_LIBS@ LIBCONFIG_CFLAGS = @LIBCONFIG_CFLAGS@ LIBCONFIG_LIBS = @LIBCONFIG_LIBS@ LIBGLIB_CFLAGS = @LIBGLIB_CFLAGS@ LIBGLIB_LIBS = @LIBGLIB_LIBS@ LIBMICROHTTPD_CFLAGS = @LIBMICROHTTPD_CFLAGS@ LIBMICROHTTPD_LIBS = @LIBMICROHTTPD_LIBS@ LIBOBJS = @LIBOBJS@ LIBS = @LIBS@ LIBSM_LIBTOOL_VERSION = @LIBSM_LIBTOOL_VERSION@ LIBSM_MAJOR_VERSION = @LIBSM_MAJOR_VERSION@ LIBSM_MICRO_VERSION = @LIBSM_MICRO_VERSION@ LIBSM_MINOR_VERSION = @LIBSM_MINOR_VERSION@ LIBSM_VERSION = @LIBSM_VERSION@ LIBSM_VERSION_INFO = @LIBSM_VERSION_INFO@ LIBSM_VERSION_NUMBER = @LIBSM_VERSION_NUMBER@ LIBTOOL = @LIBTOOL@ LIBXML_CFLAGS = @LIBXML_CFLAGS@ LIBXML_LIBS = @LIBXML_LIBS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBOBJS = @LTLIBOBJS@ MAINT = @MAINT@ MAKEINFO = @MAKEINFO@ MANIFEST_TOOL = @MANIFEST_TOOL@ MKDIR_P = @MKDIR_P@ NM = @NM@ NMEDIT = @NMEDIT@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_URL = @PACKAGE_URL@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PERL = @PERL@ PKG_CONFIG = @PKG_CONFIG@ PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@ PKG_CONFIG_PATH = @PKG_CONFIG_PATH@ PYTHON = @PYTHON@ PYTHON_EXEC_PREFIX = @PYTHON_EXEC_PREFIX@ PYTHON_PLATFORM = @PYTHON_PLATFORM@ PYTHON_PREFIX = @PYTHON_PREFIX@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SSL_LIBS = @SSL_LIBS@ STRIP = @STRIP@ VERSION = @VERSION@ YAJL_LIBS = @YAJL_LIBS@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_AR = @ac_ct_AR@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ bashcompletiondir = @bashcompletiondir@ bindir = @bindir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ datadir = @datadir@ datarootdir = @datarootdir@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ install_sh = @install_sh@ libdir = @libdir@ libexecdir = @libexecdir@ localedir = @localedir@ localstatedir = @localstatedir@ mandir = @mandir@ mkdir_p = @mkdir_p@ oldincludedir = @oldincludedir@ pdfdir = @pdfdir@ pkgpyexecdir = @pkgpyexecdir@ pkgpythondir = @pkgpythondir@ prefix = @prefix@ program_transform_name = @program_transform_name@ psdir = @psdir@ pyexecdir = @pyexecdir@ pythondir = @pythondir@ sbindir = @sbindir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ systemdsystemunitdir = @systemdsystemunitdir@ target_alias = @target_alias@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ EXTRA_DIST = doxygen.conf REST_API_Doc SUBDIRS = man all: all-recursive .SUFFIXES: $(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu doc/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --gnu doc/Makefile .PRECIOUS: Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): doxygen.conf: $(top_builddir)/config.status $(srcdir)/doxygen.conf.in cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs # This directory's subdirectories are mostly independent; you can cd # into them and run 'make' without going through this Makefile. # To change the values of 'make' variables: instead of editing Makefiles, # (1) if the variable is set in 'config.status', edit 'config.status' # (which will cause the Makefiles to be regenerated when you run 'make'); # (2) otherwise, pass the desired values on the 'make' command line. $(am__recursive_targets): @fail=; \ if $(am__make_keepgoing); then \ failcom='fail=yes'; \ else \ failcom='exit 1'; \ fi; \ dot_seen=no; \ target=`echo $@ | sed s/-recursive//`; \ case "$@" in \ distclean-* | maintainer-clean-*) list='$(DIST_SUBDIRS)' ;; \ *) list='$(SUBDIRS)' ;; \ esac; \ for subdir in $$list; do \ echo "Making $$target in $$subdir"; \ if test "$$subdir" = "."; then \ dot_seen=yes; \ local_target="$$target-am"; \ else \ local_target="$$target"; \ fi; \ ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \ || eval $$failcom; \ done; \ if test "$$dot_seen" = "no"; then \ $(MAKE) $(AM_MAKEFLAGS) "$$target-am" || exit 1; \ fi; test -z "$$fail" ID: $(am__tagged_files) $(am__define_uniq_tagged_files); mkid -fID $$unique tags: tags-recursive TAGS: tags tags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files) set x; \ here=`pwd`; \ if ($(ETAGS) --etags-include --version) >/dev/null 2>&1; then \ include_option=--etags-include; \ empty_fix=.; \ else \ include_option=--include; \ empty_fix=; \ fi; \ list='$(SUBDIRS)'; for subdir in $$list; do \ if test "$$subdir" = .; then :; else \ test ! -f $$subdir/TAGS || \ set "$$@" "$$include_option=$$here/$$subdir/TAGS"; \ fi; \ done; \ $(am__define_uniq_tagged_files); \ shift; \ if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ if test $$# -gt 0; then \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ "$$@" $$unique; \ else \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ $$unique; \ fi; \ fi ctags: ctags-recursive CTAGS: ctags ctags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files) $(am__define_uniq_tagged_files); \ test -z "$(CTAGS_ARGS)$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$unique GTAGS: here=`$(am__cd) $(top_builddir) && pwd` \ && $(am__cd) $(top_srcdir) \ && gtags -i $(GTAGS_ARGS) "$$here" cscopelist: cscopelist-recursive cscopelist-am: $(am__tagged_files) list='$(am__tagged_files)'; \ case "$(srcdir)" in \ [\\/]* | ?:[\\/]*) sdir="$(srcdir)" ;; \ *) sdir=$(subdir)/$(srcdir) ;; \ esac; \ for i in $$list; do \ if test -f "$$i"; then \ echo "$(subdir)/$$i"; \ else \ echo "$$sdir/$$i"; \ fi; \ done >> $(top_builddir)/cscope.files distclean-tags: -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags distdir: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done @list='$(DIST_SUBDIRS)'; for subdir in $$list; do \ if test "$$subdir" = .; then :; else \ $(am__make_dryrun) \ || test -d "$(distdir)/$$subdir" \ || $(MKDIR_P) "$(distdir)/$$subdir" \ || exit 1; \ dir1=$$subdir; dir2="$(distdir)/$$subdir"; \ $(am__relativize); \ new_distdir=$$reldir; \ dir1=$$subdir; dir2="$(top_distdir)"; \ $(am__relativize); \ new_top_distdir=$$reldir; \ echo " (cd $$subdir && $(MAKE) $(AM_MAKEFLAGS) top_distdir="$$new_top_distdir" distdir="$$new_distdir" \\"; \ echo " am__remove_distdir=: am__skip_length_check=: am__skip_mode_fix=: distdir)"; \ ($(am__cd) $$subdir && \ $(MAKE) $(AM_MAKEFLAGS) \ top_distdir="$$new_top_distdir" \ distdir="$$new_distdir" \ am__remove_distdir=: \ am__skip_length_check=: \ am__skip_mode_fix=: \ distdir) \ || exit 1; \ fi; \ done check-am: all-am check: check-recursive all-am: Makefile installdirs: installdirs-recursive installdirs-am: install: install-recursive install-exec: install-exec-recursive install-data: install-data-recursive uninstall: uninstall-recursive install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-recursive install-strip: if test -z '$(STRIP)'; then \ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ install; \ else \ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ fi mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-recursive clean-am: clean-generic clean-libtool mostlyclean-am distclean: distclean-recursive -rm -f Makefile distclean-am: clean-am distclean-generic distclean-tags dvi: dvi-recursive dvi-am: html: html-recursive html-am: info: info-recursive info-am: install-data-am: install-dvi: install-dvi-recursive install-dvi-am: install-exec-am: install-html: install-html-recursive install-html-am: install-info: install-info-recursive install-info-am: install-man: install-pdf: install-pdf-recursive install-pdf-am: install-ps: install-ps-recursive install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-recursive -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-recursive mostlyclean-am: mostlyclean-generic mostlyclean-libtool pdf: pdf-recursive pdf-am: ps: ps-recursive ps-am: uninstall-am: .MAKE: $(am__recursive_targets) install-am install-strip .PHONY: $(am__recursive_targets) CTAGS GTAGS TAGS all all-am check \ check-am clean clean-generic clean-libtool cscopelist-am ctags \ ctags-am distclean distclean-generic distclean-libtool \ distclean-tags distdir dvi dvi-am html html-am info info-am \ install install-am install-data install-data-am install-dvi \ install-dvi-am install-exec install-exec-am install-html \ install-html-am install-info install-info-am install-man \ install-pdf install-pdf-am install-ps install-ps-am \ install-strip installcheck installcheck-am installdirs \ installdirs-am maintainer-clean maintainer-clean-generic \ mostlyclean mostlyclean-generic mostlyclean-libtool pdf pdf-am \ ps ps-am tags tags-am uninstall uninstall-am # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: libstoragemgmt-1.2.3/doc/man/0000775000175000017500000000000012542455463013072 500000000000000libstoragemgmt-1.2.3/doc/man/megaraid_lsmplugin.10000664000175000017500000000421312542455451016734 00000000000000.TH megaraid_lsmplugin "1" "June 2015" "megaraid_lsmplugin 1.2.3" "libStorageMgmt" .SH NAME megaraid_lsmplugin -- LibstorageMgmt MegaRAID plugin .SH DESCRIPTION LibstorageMgmt megaraid plugin allows user to manage LSI MegaRAID via vendor tool \fBstorcli\fR[1] or Dell PERC via \fBperccli\fR[2]. The 'megaraid_lsmplugin' executable file is for libStorageMgmt daemon to execute when client user specifies megaraid plugin in the URI. Extra SELinux actions are required to allowing plugin access the hardware. When both storcli and percli is installed, the plugin will use storcli by default even that will result in no system found on Dell PERC server. In this case, please remove storcli. Please be noted, current volume ID is just for temperate use, we will change to use volume VPD83 as volume ID once vendor provide so. .SH URI To use this plugin, users should set their URI to this format: .nf \fBmegaraid://\fR or \fBmegaraid://?storcli=\fR .fi .TP \fBURI parameters\fR These URI parameters are supported by this plugin: .RS 7 .TP \fBstorcli\fR The 'storcli' URI parameter is used to specified the path of storcli/perccli tool. By default, this plugin will try these paths: storcli rpm: \fB/opt/MegaRAID/storcli/storcli64\fR, \fB/opt/MegaRAID/storcli/storcli\fR perccli rpm: \fB/opt/MegaRAID/perccli/perccli64\fR, \fB/opt/MegaRAID/perccli/perccli\fR .SH ROOT PRIVILEGE This plugin requires both \fBlsmd\fR daemon and API client running as root user. Please check manpage \fIlsmd.conf (5)\fR for detail. .SH SUPPORTED HARDWARES Please refer to LSI or Dell website for hardware support status of storcli/perccli. Detailed support status can be queried via: * \fBlsm.Client.capabilities()\fR (Python API) * \fBlsm_capabilities()\fR (C API) * \fBlsmcli capabilities\fR (lsmcli command line). .SH FIREWALL RULES This plugin only execute \fBstorcli\fR on localhost. No network connection required. .SH SEE ALSO \fIlsmcli\fR(1), \fIlsmd\fR(1), [1] http://www.lsi.com, [2] http://www.dell.com .SH BUGS Please report bugs to \fI\fR .SH AUTHOR Gris Ge \fI\fR libstoragemgmt-1.2.3/doc/man/smispy_lsmplugin.1.in0000664000175000017500000000544712537546123017127 00000000000000.TH smispy_lsmplugin "1" "June 2015" "smispy_lsmplugin @VERSION@" "libStorageMgmt" .SH NAME smispy_lsmplugin -- LibstorageMgmt SMI-S Plugin .SH DESCRIPTION LibstorageMgmt SMI-S Plugin allows LibstorageMgmt user to manage storage arrays like: EMC VNX/VMAX, HDS AMS and LSI MegaRAID etc. via SNIA SMI-S standard. The 'smispy_lsmplugin' executable file is for LibstorageMgmt daemon to execute when client user requests SMI-S plugin. .SH URI To use this plugin, users should set their URI to this format: .nf # HTTP connection \fBsmispy://@:?\fR # HTTPS connection \fBsmispy+ssl://@:?\fR .fi .TP \fBusername\fR The \fBusername\fR is the SMI-S provider user account. .TP \fBsmis_provder_host\fR The \fBsmis_provder_host\fR is the SMI-S provider's IP address or DNS name. .TP \fBport\fR By default, SMI-S plugin will use port \fB5988\fR for HTTP connection and port \fB5989\fR for HTTPS connection. You can skip this in URI if your SMI-S provider is using default ports. For non-default ports, SELinux will likely prevent the SMI-S plugin from making a network connection. .TP \fBURI parameters\fR These URI parameters are supported by this plugin: .RS 7 .TP \fBnamespace=\fR This URI parameter is for LSI MegaRAID and NetApp E-Series only. .TP \fBsystem=\fR For those SMI-S providers which support multi-systems(Example EMC ECIM), this URI parameter can be used to limit network communication for specified system only. The \fBsystem_id\fR is the ID of the system: * \fBlsm.System.id\fR (Python API) * \fBlsm_system_id_get()\fR (C API) * \fBlsmcli list --systems\fR (lsmcli command line) .TP \fBno_ssl_verify=yes\fR This URI parameter is for HTTPS connections only. With this URI parameter, the SMI-S plugin will not validate SMI-S provider's server SSL certificate. It's often used for self-signed CA environment, but it's strongly suggested to remove this URI parameter and install self-signed CA properly. .SH Supported Hardware The LibstorageMgmt SMI-S plugin is based on 'Block Services Package' profile , SNIA SMI-S 1.4 or later. Any storage system which implements that profile may be supported. Detailed support status can be queried via: * \fBlsm.Client.capabilities()\fR (Python API) * \fBlsm_capabilities()\fR (C API) * \fBlsmcli capabilities\fR (lsmcli command line). .SH FIREWALL RULES By default, this plugin requires access to the SMI-S provider's TCP 5988 port for HTTP conenction and TCP 5989 port for HTTPS connection. .SH SEE ALSO \fBlsmcli\fR(1), \fBlsmd\fR(1) .SH BUGS Please report bugs to \fI\fR .SH AUTHOR Gris Ge \fI\fR .br Tony Asleson \fI\fR libstoragemgmt-1.2.3/doc/man/ontap_lsmplugin.10000664000175000017500000000325612542455451016312 00000000000000.TH ontap_lsmplugin "1" "June 2015" "ontap_lsmplugin 1.2.3" "libStorageMgmt" .SH NAME ontap_lsmplugin -- LibstorageMgmt ONTAP Plugin .SH DESCRIPTION LibstorageMgmt ontap plugin allows user to manage NetApp ONTAP storage arrays. The 'ontap_lsmplugin' executable file is for libStorageMgmt daemon to execute when client specifies ontap plugin in the URI This plugin requires NetApp ONTAP storage array to enable these options: \fBoptions httpd.enable on\fR \fBoptions httpd.admin.enable on\fR This options is required for HTTPS connection: \fBoptions httpd.admin.ssl.enable on\fR .SH URI To use this plugin, users should set their URI to this format: .nf # HTTP connection \fBontap://@\fR # HTTPS connection \fBontap+ssl://@\fR .fi .TP \fBusername\fR The \fBusername\fR is the user account with administration privilege. .TP \fBontap_filer_ip\fR The \fBontap_filer_ip\fR is the NetApp ONTAP filer IP address or DNS name. .TP \fBURI parameters\fR No additional URI parameters are supported by this plugin. .SH Supported Hardware NetApp ONTAP 8.x is supported. Detailed support status can be queried via: * \fBlsm.Client.capabilities()\fR (Python API) * \fBlsm_capabilities()\fR (C API) * \fBlsmcli capabilities\fR (lsmcli command line). .SH FIREWALL RULES This plugin requires the access to the NetApp ONTAP Filer's TCP 80 port for HTTP connection and TCP 443 port for HTTPS connection. .SH SEE ALSO \fBlsmcli\fR(1), \fBlsmd\fR(1) .SH BUGS Please report bugs to \fI\fR .SH AUTHOR Gris Ge \fI\fR .br Tony Asleson \fI\fR libstoragemgmt-1.2.3/doc/man/lsmd.10000664000175000017500000000121112542455451014023 00000000000000.TH LSMD "1" "March 2013" "lsmd 1.2.3" "libStorageMgmt plug-in daemon" .SH NAME Daemon \- lsmd .SH DESCRIPTION libStorageMgmt plug\-in daemon. Plug-ins execute in their own process space for fault isolation and to accommodate different plug\-in licensing requirements. Runs as an unprivileged user. .SH OPTIONS \fB\-\-plugindir\fR = The directory where the plugins are located .HP \fB\-\-socketdir\fR = The directory for IPC sockets .HP \fB\-v\fR = Verbose logging .TP \fB\-d\fR = New style daemon (systemd) non-forking .SH BUGS Please report bugs to .SH AUTHOR Tony Asleson libstoragemgmt-1.2.3/doc/man/lsmd.1.in0000664000175000017500000000121512537546123014435 00000000000000.TH LSMD "1" "March 2013" "lsmd @VERSION@" "libStorageMgmt plug-in daemon" .SH NAME Daemon \- lsmd .SH DESCRIPTION libStorageMgmt plug\-in daemon. Plug-ins execute in their own process space for fault isolation and to accommodate different plug\-in licensing requirements. Runs as an unprivileged user. .SH OPTIONS \fB\-\-plugindir\fR = The directory where the plugins are located .HP \fB\-\-socketdir\fR = The directory for IPC sockets .HP \fB\-v\fR = Verbose logging .TP \fB\-d\fR = New style daemon (systemd) non-forking .SH BUGS Please report bugs to .SH AUTHOR Tony Asleson libstoragemgmt-1.2.3/doc/man/hpsa_lsmplugin.1.in0000664000175000017500000000313512542267433016526 00000000000000.TH hpsa_lsmplugin "1" "March 2015" "hpsa_lsmplugin @VERSION@" "libStorageMgmt" .SH NAME hpsa_lsmplugin -- LibstorageMgmt HP SmartArray plugin .SH DESCRIPTION LibstorageMgmt hpsa plugin allows user to manage HP SmartArray via vendor tool \fBhpssacli\fR[1]. The 'hpsa_lsmplugin' executable file is for libStorageMgmt daemon to execute when client user specifies hpsa plugin in the URI. .SH URI To use this plugin, users should set their URI to this format: .nf \fBhpsa://\fR or \fBhpsa://?hpssacli=\fR .fi .TP \fBURI parameters\fR These URI parameters are supported by this plugin: .RS 7 .TP \fBhpssacli\fR The 'hpssacli' URI parameter is used to specified the path of hpssacli tool. By default, this plugin will try these paths used by hpssacli rpm: \fB/usr/sbin/hpssacli\fR and \fB/opt/hp/hpssacli/bld/hpssacli\fR. .SH ROOT PRIVILEGE This plugin requires both \fBlsmd\fR daemon and API client running as root user. Please check manpage \fIlsmd.conf (5)\fR for detail. .SH SUPPORTED HARDWARES Please refer to HP website for hardware support status of hpssacli. Detailed support status can be queried via: * \fBlsm.Client.capabilities()\fR (Python API) * \fBlsm_capabilities()\fR (C API) * \fBlsmcli capabilities\fR (lsmcli command line). .SH FIREWALL RULES This plugin only execute \fBhpssacli\fR on localhost. No network connection required. .SH SEE ALSO \fIlsmcli\fR(1), \fIlsmd\fR(1), [1]http://downloads.linux.hp.com/SDR/project/spp/ .SH BUGS Please report bugs to \fI\fR .SH AUTHOR Gris Ge \fI\fR libstoragemgmt-1.2.3/doc/man/nstor_lsmplugin.1.in0000664000175000017500000000332312537546123016737 00000000000000.TH nstor_lsmplugin "1" "June 2015" "nstor_lsmplugin @VERSION@" "libStorageMgmt" .SH NAME nstor_lsmplugin -- LibstorageMgmt nstor plugin .SH DESCRIPTION LibstorageMgmt nstor plugin allows user to manage NexentaStor 3.x storage software [1]. The 'nstor_lsmplugin' executable file is for libStorageMgmt daemon to execute when client user specifies nstor plugin in the URI. .SH URI To use this plugin, users should set their URI to this format: .nf # HTTP connection \fBnstor://@:\fR # HTTPS connection \fBnstor+ssl://@:\fR .fi .TP \fBusername\fR The \fBusername\fR is the user account with 'Can_use_restapi' permission. .TP \fBnstor_server\fR The \fBnstor_server\fR is IP address or DNS name of NexentaStor server. .TP \fBport number\fR The \fBport number\fR is the listening port of the nexenta server REST API. The default port of 2000 is used if none is supplied on the URI. For non-default ports, SELinux will likely prevent the SMI-S plugin from making a network connection. .TP \fBURI parameters\fR No additional URI parameters are supported by this plugin. .SH SUPPORTED SOFTWARE NexentaStor 3.x is supported. Detailed support status can be queried via: * \fBlsm.Client.capabilities()\fR (Python API) * \fBlsm_capabilities()\fR (C API) * \fBlsmcli capabilities\fR (lsmcli command line). .SH FIREWALL RULES By default, this plugin requires the access to the nstor array's TCP 2000 port. .SH SEE ALSO \fBlsmcli\fR(1), \fBlsmd\fR(1), [1] http://nexentastor.org .SH BUGS Please report bugs to \fI\fR .SH AUTHOR Gris Ge \fI\fR .br Tony Asleson \fI\fR libstoragemgmt-1.2.3/doc/man/simc_lsmplugin.10000664000175000017500000000241512542455451016120 00000000000000.TH simc_lsmplugin "1" "June 2015" "simc_lsmplugin 1.2.3" "libStorageMgmt" .SH NAME simc_lsmplugin -- LibstorageMgmt Simulator C Plugin .SH DESCRIPTION LibStorageMgmt simulator C plugin is for development use. The plugin simulates an array which supports most features of the library. The simulator is memory based, state will be discarded once the plugin exits. The 'simc_lsmplugin' executable file is for the libStorageMgmt daemon to execute when client user specifies simc plugin in the URI. Since every command of lsmcli is a standalone libStorageMgmt session, this plugin is essentially useless for this purpose. In this use case, the libStorageMgmt simulator plugin \fBsim_lsmplugin(1)\fR is suggested. This plugin is written in pure C and is intended to be an example of a C plugin for the library. .SH URI To use this plugin, users should set their URI to this format: .nf simc:// .fi No password is required for this plugin. No URI parameters are supported by this plugin. .SH FIREWALL RULES This plugin requires not network access. .SH SEE ALSO \fBlsmcli\fR(1), \fBlsmd\fR(1), \fBsim_lsmplugin\fR(1) .SH BUGS Please report bugs to \fI\fR .SH AUTHOR Gris Ge \fI\fR .br Tony Asleson \fI\fR libstoragemgmt-1.2.3/doc/man/smispy_lsmplugin.10000664000175000017500000000544312542455451016515 00000000000000.TH smispy_lsmplugin "1" "June 2015" "smispy_lsmplugin 1.2.3" "libStorageMgmt" .SH NAME smispy_lsmplugin -- LibstorageMgmt SMI-S Plugin .SH DESCRIPTION LibstorageMgmt SMI-S Plugin allows LibstorageMgmt user to manage storage arrays like: EMC VNX/VMAX, HDS AMS and LSI MegaRAID etc. via SNIA SMI-S standard. The 'smispy_lsmplugin' executable file is for LibstorageMgmt daemon to execute when client user requests SMI-S plugin. .SH URI To use this plugin, users should set their URI to this format: .nf # HTTP connection \fBsmispy://@:?\fR # HTTPS connection \fBsmispy+ssl://@:?\fR .fi .TP \fBusername\fR The \fBusername\fR is the SMI-S provider user account. .TP \fBsmis_provder_host\fR The \fBsmis_provder_host\fR is the SMI-S provider's IP address or DNS name. .TP \fBport\fR By default, SMI-S plugin will use port \fB5988\fR for HTTP connection and port \fB5989\fR for HTTPS connection. You can skip this in URI if your SMI-S provider is using default ports. For non-default ports, SELinux will likely prevent the SMI-S plugin from making a network connection. .TP \fBURI parameters\fR These URI parameters are supported by this plugin: .RS 7 .TP \fBnamespace=\fR This URI parameter is for LSI MegaRAID and NetApp E-Series only. .TP \fBsystem=\fR For those SMI-S providers which support multi-systems(Example EMC ECIM), this URI parameter can be used to limit network communication for specified system only. The \fBsystem_id\fR is the ID of the system: * \fBlsm.System.id\fR (Python API) * \fBlsm_system_id_get()\fR (C API) * \fBlsmcli list --systems\fR (lsmcli command line) .TP \fBno_ssl_verify=yes\fR This URI parameter is for HTTPS connections only. With this URI parameter, the SMI-S plugin will not validate SMI-S provider's server SSL certificate. It's often used for self-signed CA environment, but it's strongly suggested to remove this URI parameter and install self-signed CA properly. .SH Supported Hardware The LibstorageMgmt SMI-S plugin is based on 'Block Services Package' profile , SNIA SMI-S 1.4 or later. Any storage system which implements that profile may be supported. Detailed support status can be queried via: * \fBlsm.Client.capabilities()\fR (Python API) * \fBlsm_capabilities()\fR (C API) * \fBlsmcli capabilities\fR (lsmcli command line). .SH FIREWALL RULES By default, this plugin requires access to the SMI-S provider's TCP 5988 port for HTTP conenction and TCP 5989 port for HTTPS connection. .SH SEE ALSO \fBlsmcli\fR(1), \fBlsmd\fR(1) .SH BUGS Please report bugs to \fI\fR .SH AUTHOR Gris Ge \fI\fR .br Tony Asleson \fI\fR libstoragemgmt-1.2.3/doc/man/megaraid_lsmplugin.1.in0000664000175000017500000000421712542267433017346 00000000000000.TH megaraid_lsmplugin "1" "June 2015" "megaraid_lsmplugin @VERSION@" "libStorageMgmt" .SH NAME megaraid_lsmplugin -- LibstorageMgmt MegaRAID plugin .SH DESCRIPTION LibstorageMgmt megaraid plugin allows user to manage LSI MegaRAID via vendor tool \fBstorcli\fR[1] or Dell PERC via \fBperccli\fR[2]. The 'megaraid_lsmplugin' executable file is for libStorageMgmt daemon to execute when client user specifies megaraid plugin in the URI. Extra SELinux actions are required to allowing plugin access the hardware. When both storcli and percli is installed, the plugin will use storcli by default even that will result in no system found on Dell PERC server. In this case, please remove storcli. Please be noted, current volume ID is just for temperate use, we will change to use volume VPD83 as volume ID once vendor provide so. .SH URI To use this plugin, users should set their URI to this format: .nf \fBmegaraid://\fR or \fBmegaraid://?storcli=\fR .fi .TP \fBURI parameters\fR These URI parameters are supported by this plugin: .RS 7 .TP \fBstorcli\fR The 'storcli' URI parameter is used to specified the path of storcli/perccli tool. By default, this plugin will try these paths: storcli rpm: \fB/opt/MegaRAID/storcli/storcli64\fR, \fB/opt/MegaRAID/storcli/storcli\fR perccli rpm: \fB/opt/MegaRAID/perccli/perccli64\fR, \fB/opt/MegaRAID/perccli/perccli\fR .SH ROOT PRIVILEGE This plugin requires both \fBlsmd\fR daemon and API client running as root user. Please check manpage \fIlsmd.conf (5)\fR for detail. .SH SUPPORTED HARDWARES Please refer to LSI or Dell website for hardware support status of storcli/perccli. Detailed support status can be queried via: * \fBlsm.Client.capabilities()\fR (Python API) * \fBlsm_capabilities()\fR (C API) * \fBlsmcli capabilities\fR (lsmcli command line). .SH FIREWALL RULES This plugin only execute \fBstorcli\fR on localhost. No network connection required. .SH SEE ALSO \fIlsmcli\fR(1), \fIlsmd\fR(1), [1] http://www.lsi.com, [2] http://www.dell.com .SH BUGS Please report bugs to \fI\fR .SH AUTHOR Gris Ge \fI\fR libstoragemgmt-1.2.3/doc/man/Makefile.am0000664000175000017500000000052512537546123015046 00000000000000notrans_dist_man1_MANS = lsmcli.1 lsmd.1 \ sim_lsmplugin.1 \ simc_lsmplugin.1 \ smispy_lsmplugin.1 \ ontap_lsmplugin.1 \ targetd_lsmplugin.1 \ nstor_lsmplugin.1 if WITH_MEGARAID notrans_dist_man1_MANS += megaraid_lsmplugin.1 endif if WITH_HPSA notrans_dist_man1_MANS += hpsa_lsmplugin.1 endif notrans_dist_man5_MANS = lsmd.conf.5.in libstoragemgmt-1.2.3/doc/man/sim_lsmplugin.10000664000175000017500000000245112542455451015755 00000000000000.TH sim_lsmplugin "1" "June 2015" "sim_lsmplugin 1.2.3" "libStorageMgmt" .SH NAME sim_lsmplugin -- LibStorageMgmt simulator python plugin .SH DESCRIPTION LibstorageMgmt simulator plugin is for development or test use only. The plugin simulates an array which supports most features of the library. State is stored in a file, default is '/tmp/lsm_sim_data'. The 'sim_lsmplugin' executable file is for libStorageMgmt daemon to execute when client user specifies sim plugin in the URI. This plugin is written in pure Python and is intended to be an example of a python plugin for the library. .SH URI To use this plugin, users should set their URI to this format: .nf # All that is required sim:// # Optional statefile sim://?statefile= .fi No password is required for this plugin. .TP \fBURI parameters\fR .RS 7 .TP \fBstatefile\fR Use specified file to store simulator state data. Example URI: .nf \fBsim://?statefile=/tmp/other_lsm_sim_data\fR .fi The statefile is a sqlite3 data base file. .SH FIREWALL RULES This plugin requires not network access. .SH SEE ALSO \fBlsmcli\fR(1), \fBlsmd\fR(1) .SH BUGS Please report bugs to \fI\fR .SH AUTHOR Gris Ge \fI\fR .br Tony Asleson \fI\fR libstoragemgmt-1.2.3/doc/man/lsmcli.1.in0000664000175000017500000005326512537546123014775 00000000000000.TH LSMCLI "1" "September 2014" "lsmcli @VERSION@" "libStorageMgmt" .SH NAME lsmcli - libStorageMgmt command line interface .SH SYNOPSIS .B lsmcli \fIcommand\fR [\fIGLOBAL OPTIONS\fR]...[\fICOMMAND OPTIONS\fR]... .SH DESCRIPTION lsmcli is the command line tool for the libStorageMgmt library. This tool allows users to do one off storage related management operations or to script management of their storage. .SH PREREQUISITES .TP 8 * libStorageMgmt daemon. The daemon '\fBlsmd\fR' is required by lsmcli. .TP 8 * URI(Uniform Resource Identifier) URI is used to identify which plugin to use and how the plugin should communicate with the storage array. The valid URI format is: .RS 12 .nf \fBplugin://@host:?\fR .br \fBplugin+ssl://@host:?\fR .fi .RE 1 .RS 8 Examples, please refer to "LibStorageMgmt User Guide" for more details: .RE 1 .RS 12 * Simulator: .RS 4 \fBsim://\fR .br \fBsimc://\fR .RE 1 .RS 12 * NetApp ONTAP: .RS 4 \fBontap://username@host\fR .br \fBontap+ssl://username@host\fR .RE 1 .RS 12 * SMI-S supported arrays (eg. EMC CX/VNX, HDS AMS, IBM SVC/DS, LSI MegaRAID and others): .RS 4 \fBsmis://username@host:?namespace=\fR .br \fBsmis+ssl://username@host:?namespace=\fR .RE 1 .RS 8 You can pass URI to lsmcli via one of these methods: .RS 4 * Using '\fB-u\fR, \fB--uri\fR' argument. .br * Using '\fBLSMCLI_URI\fR' environment variable. .br * Add this line into \fB$HOME/.lsmcli\fR: .RS 4 uri=\fI\fR .RE 1 .TP 8 * Password For storage array password authentication you can pass it to lsmcli via one of the following methods: .RS 12 * '\fB-P\fR, \fB--prompt\fR' argument to prompt for password. .br * '\fBLSMCLI_PASSWORD\fR' environment variable. .RE 1 .SH "GLOBAL OPTIONS" .TP 15 \fB--version\fR Show program's version number and exit .TP 15 \fB-h\fR, \fB--help\fR Show this help message and exit. Will show help message of specific command if specified. .TP 15 \fB-u\fR \fI\fR, \fB--uri\fR \fI\fR Uniform Resource Identifier (env LSMCLI_URI) .TP 15 \fB-P\fR, \fB--prompt\fR Prompt for password (env LSMCLI_PASSWORD) .TP 15 \fB-H\fR, \fB--human\fR Print sizes in human readable format (e.g., KiB, MiB, GiB, TiB, PiB, EiB) .TP 15 \fB-t\fR \fI\fR, \fB--terse\fR \fI\fR Print output in terse form with "SEP" as a record separator without header unless '--header' defined. .TP 15 \fB--header\fR Include the header with terse .TP 15 \fB-e\fR, \fB--enum\fR Display enumerated types as numbers instead of text .TP 15 \fB-f\fR, \fB--force\fR Bypass confirmation prompt for data loss operations .TP 15 \fB-w\fR \fI\fR, \fB--wait\fR=\fI\fR Command timeout value in ms (default = 30s) .TP 15 \fB-b\fR Run the command asynchronously instead of waiting for completion. The lsmcli command will exit with exit code(7) and job id will be written to STDOUT when a command is still executing on the storage array. Use 'job-status --id \fI\fR' to inquire on the progress of the command. Some arrays or plugins might not support asynchronous operations, in those circumstances, \fb-b\fR will be ineffective. Command will wait until finished. .TP 15 \fB-s\fR, \fB--script\fR Displaying data in script friendly way. .br Without this option, data is displayed in this manner (default): ID | Name | Element Type ... ------------------------------------------ ... aggr0 | aggr0 | FS,SYSTEM_RESERVED,POOL ... iscsi | iscsi | FS,POOL ... With this option, data is displayed in this manner. ----------------------------------------------- ID | aggr0 Name | aggr0 Element Type | FS,SYSTEM_RESERVED,POOL ... ----------------------------------------------- ID | iscsi Name | iscsi Element Type | FS,POOL ... Please note: .br To reduce the width of output, NOT all properties will be displayed in default column display. .SH COMMANDS .SS list List information on LSM objects .TP 15 \fB--type\fR \fI\fR Required. Valid values are (case insensitive): .br \fBVOLUMES\fR, \fBPOOLS\fR, \fBFS\fR, \fBSNAPSHOTS\fR, \fBEXPORTS\fR, \fBNFS_CLIENT_AUTH\fR, .br \fBACCESS_GROUPS\fR, \fBSYSTEMS\fR, \fBDISKS\fR, \fBPLUGINS\fR, \fBTARGET_PORTS\fR. .TP \fB--fs\fR \fI\fR Required for \fB--type\fR=\fBSNAPSHOTS\fR. List the snapshots of certain filesystem. PLUGINS will list all supported plugins of LSM, not only the current one. .TP \fB--sys\fR \fI\fR Search resources from system with SYS_ID. Only supported when querying these types of resources: \fBVOLUMES\fR, \fBPOOLS\fR, \fBFS\fR, \fBSNAPSHOTS\fR, \fBDISKS\fR, \fBACCESS_GROUPS\fR. .TP \fB--pool\fR \fI\fR Search resources from pool with POOL_ID. Only supported by these types of resources: \fBVOLUMES\fR, \fBPOOLS\fR, \fBFS\fR. .TP \fB--vol\fR \fI\fR Search resources from volume with VOL_ID. Only supported by these types of resources: \fBVOLUMES\fR, \fBACCESS_GROUPS\fR. .br To query volume masking status, please use this command: lsmcli list --type ACCESS_GROUPS --vol .TP \fB--disk\fR \fI\fR Search resources from disk with DISK_ID. Only supported by these types of resources: \fBDISK\fR. .TP \fB--ag\fR \fI\fR Search resources from access group with AG_ID. Only supported by these types of resources: \fBACCESS_GROUPS\fR, \fBVOLUMES\fR. .br To query volume masking status, please use this command: lsmcli list --type VOLUMES --ag .TP \fB--fs\fR \fI\fR Search resources from file system with FS_ID. Only supported by these types of resources: \fBFS\fR. .TP \fB--nfs-export\fR \fI\fR Search resources from NFS export with NFS_EXPORT_ID. Only supported by these types of resources: \fBEXPORTS\fR. .TP \fB--tgt\fR \fI\fR Search resources from target port with target port ID. Only supported by these types of resources: \fBTARGET_PORTS\fR. .SS job-status Retrieve information about a job. Please see user guide on how to use. .TP 15 \fB--job\fR \fI\fR .SS capabilities Retrieves array capabilities. .TP 15 \fB--sys\fR \fI\fR Required. ID of the system to query for capabilities. .SS plugin-info Retrieves plugin description and version for current URI. .SS volume-create Creates a volume (AKA., logical volume, virtual disk, LUN). .TP 15 \fB--name\fR \fI\fR Required. Volume name. .TP \fB--size\fR \fI\fR Required. Volume size (See \fBSIZE OPTION\fR for allowed formats). .TP \fB--pool\fR \fI\fR Required. ID of pool. .TP \fB--provisioning\fR \fI\fR Optional. Provisioning type. Valid values are: DEFAULT, THIN, FULL. \fBDEFAULT\fR means let plugin choose. \fBTHIN\fR means requiring a Thin Provisioning enabled volume. \fBFULL\fR means requiring a fully allocated volume. .SS volume-raid-create Creates a volume on hardware RAID on given disks. .TP 15 \fB--name\fR \fI\fR Required. Volume name. Might be altered or ignored due to hardware RAID card vendor limitation. .TP \fB--raid-type\fR \fI\fR Required. Could be one of these values: \fBRAID0\fR, \fBRAID1\fR, \fBRAID5\fR, \fBRAID6\fR, \fBRAID10\fR, \fBRAID50\fR, \fBRAID60\fR. The supported RAID types of current RAID card could be queried via command "\fBvolume-raid-create-cap\fR". .TP \fB--disk\fR \fI\fR Required. Repeatable. The disk ID for new RAID group. .TP \fB--strip-size\fR \fI\fR Optional. The size in bytes of strip on each disks. If not defined, will let hardware card to use the vendor default value. The supported stripe size of current RAID card could be queried via command "\fBvolume-raid-create-cap\fR". .SS volume-raid-create-cap Query support status of volume-raid-create command for current hardware RAID card. .TP 15 \fB--sys\fR \fI\fR Required. ID of the system to query for capabilities. .SS volume-delete .TP 15 Delete a volume given its ID .TP \fB--vol\fR \fI\fR Required. The ID of volume to delete. .SS volume-resize .TP 15 Re-sizes a volume, requires: .TP \fB--vol\fR \fI\fR Required. The ID of volume to resize. .TP \fB--size\fR \fI\fR Required. The new size of volume.(See \fBSIZE OPTION\fR for allowed formats). Due to boundary alignment concern, array might return a volume with slightly bigger size than requested. .SS volume-replicate Creates a new volume and replicates provided volume to it. .TP 15 \fB--vol\fR \fI\fR Required. The ID of volume to replicate. .TP \fB--name\fR \fI\fR Required. The name for new volume to hold replicated data. .TP \fB--rep-type\fR \fI\fR (see \fBVOLUME REPLICATION TYPES)\fR Required. Valid types of replication are: .br \fBCLONE\fR, \fBCOPY\fR, \fBMIRROR_ASYNC\fR, \fBMIRROR_SYNC\fR. .TP \fB--pool\fR \fI\fR Optional. The ID of pool where the new volume should be created from. If not specified, plugin or array will choose appropriate pool. .SS volume-replicate-range Replicates a portion of a volume to the same volume or to a different volume. .TP 15 \fB--src-vol\fR \fI\fR Required. The ID of replication source volume. .TP \fB--dst-vol\fR \fI\fR Required. The ID of replication destination volume. .TP \fB--rep-type\fR \fI\fR (see \fBVOLUME REPLICATION TYPES)\fR Required. Appropriate types of replication are: .br \fBCLONE\fR, \fBCOPY\fR. .TP \fB--src-start\fR \fI\fR Required. Replication source volume start block number. Must in pair with \fB--count\fR and \fB--dst-start\fR. If you have several non-continuous block ranges, you can define repeatly define this arugument, like '\fB--src-start 0 --dst-start 0 --count 1024 --src-start 2048 --dst-start 2048 --count 2048\fR' .TP \fB--dst-start\fR \fI\fR Required. Replication destination volume start block number. Must in pair with \fB--count\fR and \fB--src-start\fR. .TP \fB--count\fR \fI\fR Required. The count of replicated block startting from \fB--src-start\R block. Must in pair with \fB--src-start\fR and \fB--dst-start\fR. .SS volume-replicate-range-block-size Size of each replicated block on a system in bytes. .TP 15 \fB--sys\fR \fI\fR Required. ID of the system to query for replicated block size. .SS volume-dependants Returns True if volume has a dependant child, like replication. .TP 15 \fB--vol\fR \fI\fR Required. The ID of volume to query dependency. .SS volume-dependants-rm Removes volume dependencies(like replication). .TP 15 \fB--vol\fR \fI\fR Required. The ID of volume to remove dependency. .SS volume-access-group Lists the access group(s) that have access to the provided volume. .TP 15 \fB--vol\fR \fI\fR Required. The ID of volume to query access. .SS volume-mask .TP 15 Grant access group RW access to certain volume. Like LUN masking or NFS export. .TP \fB--vol\fR \fI\fR Required. The ID of volume to access. .TP \fB--ag\fR \fI\fR Required. The ID of access group to grant. .SS volume-unmask .TP 15 Revoke access group RW access to specified volume. .TP \fB--vol\fR \fI\fR Required. The ID of volume to revoke. .TP \fB--ag\fR \fI\fR Required. The ID of access group to revoke. .SS volume-raid-info .TP 15 Query RAID information for given volume. .TP \fB--vol\fR \fI\fR Required. The ID of volume to query. .SS pool-member-info .TP 15 Query RAID information for given pool. .TP \fB--pool\fR \fI\fR Required. The ID of pool to query. .SS access-group-create .TP 15 Create an access group. .TP \fB--name\fR \fI\fR Required. The human friendly name for new access group. .TP \fB--init\fR \fI\fR Required. The first initiator ID of new access group. WWPN or iSCSI IQN. .TP \fB--sys\fR \fI\fR Required. The ID of system where this access group should reside on. .SS access-group-add Adds an initiator to an access group. .TP 15 \fB--ag\fR \fI\fR Required. ID of access group. .TP \fB--init\fR \fI\fR Required. ID of initiator to add. WWPN or iSCSI IQN. .SS access-group-remove Removes an initiator from an access group. .TP 15 \fB--ag\fR \fI\fR Required. ID of access group. .TP \fB--init\fR \fI\fR Required. ID of initiator to remove. .SS access-group-delete Delete an access group. .TP 15 \fB--ag\fR \fI\fR Required. ID of access group to delete. .SS access-group-volumes Lists the volumes that the access group has been granted access to. .TP 15 \fB--ag\fR \fI\fR Required. The ID of access group to query. .SS iscsi-chap Configures ISCSI inbound/outbound CHAP authentication. .TP 15 \fB--init\fR \fI\fR Required. The ID of iSCSI initiator to configure. .TP \fB--in-user\fR \fI\fR Optional. Inbound CHAP user name. .TP \fB--in-pass\fR \fI\fR Optional. Inbound CHAP password. .TP \fB--out-user\fR \fI\fR Optional. Outbound CHAP user name. .TP \fB--out-pass\fR \fI\fR Optional. Outbound CHAP password. .SS fs-create Creates a filesystem. .TP 15 \fB--name\fR \fI\fR Required. Human friendly name for new filesystem. .TP \fB--size\fR \fI\fR Required. Volume size(See \fBSIZE OPTION\fR for allowed formats). .TP \fB--pool\fR \fI\fR Required. ID of pool to hold the new filesystem. .SS fs-delete Delete a filesystem. .TP 15 \fB--fs\fR \fI\fR Required. ID of the filesystem to delete. .SS fs-resize Resizes a filesystem. .TP 15 \fB--fs\fR \fI\fR Required. ID of the filesystem to resize. .TP \fB--size\fR \fI\fR Required. New size of filesystem. See \fBSIZE OPTION\fR for allowed formats. .SS fs-export Export a filesystem via NFS. .TP 15 \fB--fs\fR \fI\fR Required. ID of the filesystem to export. .TP \fB--exportpath\fR \fI\fR Optional. NFS server export path. e.g. '/foo/bar'. .TP \fB--anonuid\fR \fI\fR Optional. The UID(User ID) to map to anonymous user. .TP \fB--anongid\fR \fI\fR Optional. The GID(Group ID) to map to anonymous user. .TP \fB--auth-type\fR \fI\fR Optional. NFS client authentication type. This is just a place holder, not supported yet. .TP \fB--root-host\fR \fI\fR Optional. Repeatable. The host/IP has root access. For two or more hosts/IPs: '\fB--root-host hostA --root-host hostB\fR'. .TP \fB--ro-host\fR \fI\fR Optional. Repeatable. The host/IP has read only access. For two or more hosts/IPs: '\fB--ro-host hostA --ro-host hostB\fR'. .TP \fB--rw-host\fR \fI\fR Optional. The host/IP has read/write access. For two or more hosts/IPs: '\fB--rw-host hostA --rw-host hostB\fR'. .SS fs-unexport Remove an NFS export. .TP 15 \fB--fs\fR \fI\fR Required. ID of the filesystem to unexport. .SS fs-clone Creates a file system clone. The 'clone' means point in time read writeable space efficient copy of data, AKA. read-writable snapshot. .TP 15 \fB--src-fs\fR \fI\fR Required. The ID of the filesystem to clone. .TP \fB--dst-name\fR \fI\fR Required. The name for newly created destination file system. .TP \fB--backing-snapshot\fR \fI\fR Optional. Make a FS clone using a previously created snapshot. .SS fs-snap-create Creates a snapshot of specified filesystem. A snapshot is defined as a read only space efficient point in time copy (PIT) of a filesystem. The source filesystem remains modifiable. .TP 15 \fB--name\fR \fI\fR Required. The human friendly name of new snapshot. .TP \fB--fs\fR \fI\fR Required. The ID of filesystem to create snapshot against. .SS fs-snap-delete Deletes a snapshot. .TP 15 \fB--snap\fR \fI\fR Required. The ID of snapshot to delete. .TP \fB--fs\fR \fI\fR Required. The ID of filesystem. .SS fs-snap-restore Restores a FS or specified files to previous snapshot state. This will discard all the changes to filesystem since snapshot if specific files are not specified in restore. .TP 15 \fB--fs\fR \fI\fR Required. The ID of filesystem to restore. .TP \fB--snap\fR \fI\fR Required. The ID of snapshot to restore. .TP \fB--file\fR \fI\fR Optional. Repeatable. With this option defined, will only restore the defined file(s). '\fB--file fileA --file pathB\fR'. .TP \fB--fileas\fR \fI\fR Optional. Repeatable. With this option defined, the restored file will be saved to specified path and filename, eg. '\fB--file fileA --fileas old_fileA \fR'. .SS fs-dependants Returns True if a child dependency (snapshot or clone) exists. .TP 15 \fB--fs\fR \fI\fR Required. The ID of filesystem to query. .TP \fB--file\fR \fI\fR Optional. Repeatable. Only check for dependencies on specific file(s), eg. '\fB--file fileA --file pathB\fR'. .SS fs-dependants-rm Removes filesystem dependencies(snapshot or clone). .TP 15 \fB--fs\fR \fI\fR Required. The ID of filesystem to remove dependency. .TP \fB--file\fR \fI\fR Optional. Repeatable. Only remove dependencies on specific file(s), eg. '\fB--file fileA --file pathB\fR'. .SS file-clone Creates a clone of a file (thin provisioned). Note: --src and --dst need to be paired eg. '\fB--src fileA --src fileB --dst fileA_clone --dst fileB_clone\fR'. .TP 15 \fB--src\fR \fI\fR Required. Repeatable. Source file to clone (relative path). .TP \fB--dst\fR \fI\fR Required. Repeatable. Destination file for clone (relative path). .IP .SH ALIAS .SS ls Alias of 'list --type systems' .SS lp Alias of 'list --type pools' .SS lv Alias of 'list --type volumes' .SS ld Alias of 'list --type disks' .SS la Alias of 'list --type access_groups' .SS lf Alias of 'list --type fs' .SS lt Alias of 'list --type target_ports' .SS c Alias of 'capabilities' .SS p Alias of 'plugin-info' .SS vc Alias of 'volume-create' .SS vrc Alias of 'volume-raid-create' .SS vrcc Alias of 'volume-raid-create-cap' .SS vd Alias of 'volume-delete' .SS vr Alias of 'volume-resize' .SS vm Alias of 'volume-mask' .SS vu Alias of 'volume-unmask' .SS vri Alias of 'volume-raid-info' .SS pmi Alias of 'pool-member-info' .SS ac Alias of 'access-group-create' .SS aa Alias of 'access-group-add' .SS ar Alias of 'access-group-remove' .SS ad Alias of 'access-group-delete' .IP .SH SIZE OPTION .SS \fB--size\fR \fI\fI\fR\fR Storage space size. Format is '\fI\fR' + '\fI\fR'. Example: "10GiB", "20.5MB". No postfix indicates bytes. Valid prefixes are: KiB, # 2^10 Bytes MiB, # 2^20 Bytes GiB, # 2^30 Bytes TiB, # 2^40 Bytes PiB, # 2^50 Bytes EiB, # 2^60 Bytes KB, # 10^3 Bytes MB, # 10^6 Bytes GB, # 10^9 Bytes TB, # 10^12 Bytes PB, # 10^15 Bytes EB, # 10^17 Bytes These prefixes are supported also, but not recommended: K, M, G, T, P, E, # equal to KiB, MiB, and etc k, m, g, t, p, e, # equal to KiB, MiB, and etc .SH FILES .TP 15 ~/.lsmcli lsmcli configuration file, containing name-value pairs separated by '='. The only currently supported configuration option is 'uri', such as 'uri=ontap://user@filer.example.com'. Configuration options in .lsmcli are only used if not overridden by command-line option or environment variable. .SH EXAMPLES (command output omitted for brevity) .TP 15 Simulator, list pools (no password required) $ lsmcli -u sim:// -l POOLS .TP 15 NetApp, list volumes (prompting for password) $ lsmcli -u ontap://root@host/ -l VOLUMES -P .TP 15 SMI-S, list systems (prompting for password) .nf $ lsmcli -u smispy://username@host:5988/?namespace=root/interop \\ -l SYSTEMS -P .fi .TP 15 Targetd, list pools (using env variables for URI and password) $ export LSMCLI_URI=targetd://username@host:18700 .br $ export LSMCLI_PASSWORD=\fI\fR .br $ lsmcli -l POOLS .TP 15 NexentaStor, create volume (using environment variables for URI and password) .nf $ export LSMCLI_URI='nstor://user@host' $ export LSMCLI_PASSWORD=\fI\fR $ lsmcli volume-create --name volume_name --size 1TiB --pool default .fi .TP 15 SMI-S, create volume (using environment variables for URI and password) .nf $ export LSMCLI_URI='smispy+ssl://user@host:5989?namespace=root/emc' $ export LSMCLI_PASSWORD=\fI\fR $ lsmcli volume-create --name volume_name --size 1TiB --pool default .fi .SH ENVIRONMENT .TP 17 LSMCLI_URI The URI for the storage array in question. .TP 17 LSMCLI_PASSWORD The password to use for the array. .SH VOLUME REPLICATION TYPES .TP 17 CLONE A point in time, read writeable, space efficent copy of data. .TP 17 COPY A full bitwise copy of the data. It occupies the full space. .TP 17 MIRROR_SYNC Continously updated, realtime with both copies having identical data. .TP 17 MIRROR_ASYNC Continously updated, with a varying amount of delay and data delta between the source and target. .SH NOTES .TP 8 Plugin installation Plugins are installed individually except for the simulators which are always included. .TP 8 Secure sockets layer (SSL) All of the plugins (except the simulator) support SSL when communicating from the plugin to the array. This is accomplished by adding "+ssl" to the plugin and usually by selecting a different port number from non-SSL communications. .br .nf $ lsmcli -u smispy+ssl://username@host:5989/?namespace=interop \\ list --type SYSTEMS -P .fi .TP 8 SSL error: certificate verify failed When using SMI-S plugin with SSL against self-signed SMI-S provider, lsmcli normally quit with '\fBSSL error: certificate verify failed\fR'. Please contact SMI-S provider support to setup the self-signed certificate in your system. If you prefer to bypass the certificate check, add 'no_ssl_verify=yes' at the end of URI, for example: .RS 12 .nf \fBsmispy+ssl://admin@emc-smi:5989?namespace=root/emc&no_ssl_verify=yes\fR .fi .SH BUGS Please report bugs to \fI\fR .SH AUTHOR Tony Asleson \fI\fR .br Gris Ge \fI\fR libstoragemgmt-1.2.3/doc/man/targetd_lsmplugin.1.in0000664000175000017500000000327312537546123017230 00000000000000.TH targetd_lsmplugin "1" "June 2015" "targetd_lsmplugin @VERSION@" "libStorageMgmt" .SH NAME targetd_lsmplugin -- libStorageMgmt targetd plugin .SH DESCRIPTION LibStorageMgmt targetd plugin allows user to manage storage using the targetd storage API [1]. The 'targetd_lsmplugin' executable file is for libStorageMgmt daemon to execute when client user specifies targetd plugin in the URI. .SH URI To use this plugin, users should set their URI to this format: .nf # HTTP connection \fBtargetd://@:\fR # HTTPS connection \fBtargetd+ssl://@:\fR .fi .TP \fBusername\fR The \fBusername\fR is the user account configured in targetd configuration file. .TP \fBtargetd_server\fR The \fBtargetd_server\fR is the IP address or DNS name of server running targetd daemon. .TP \fBport number\fR The \fBport number\fR is the listening port of the targetd daemon. The default port of 18700 is used if none is supplied on the URI. .TP \fBURI parameters\fR No additional URI parameters are supported by this plugin. .SH SUPPORTED SOFTWARE Linux targetd 0.7.1 or later version. Detailed support status can be queried via: * \fBlsm.Client.capabilities()\fR (Python API) * \fBlsm_capabilities()\fR (C API) * \fBlsmcli capabilities\fR (lsmcli command line). .SH FIREWALL RULES By default, this plugin requires access to the targetd server's TCP 18700 port. .SH SEE ALSO \fBlsmcli\fR(1), \fBlsmd\fR(1), [1] https://github.com/agrover/targetd .SH BUGS Please report bugs to \fI\fR .SH AUTHOR Gris Ge \fI\fR .br Tony Asleson \fI\fR libstoragemgmt-1.2.3/doc/man/Makefile.in0000664000175000017500000004711212542455445015064 00000000000000# Makefile.in generated by automake 1.13.4 from Makefile.am. # @configure_input@ # Copyright (C) 1994-2013 Free Software Foundation, Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__is_gnu_make = test -n '$(MAKEFILE_LIST)' && test -n '$(MAKELEVEL)' am__make_running_with_option = \ case $${target_option-} in \ ?) ;; \ *) echo "am__make_running_with_option: internal error: invalid" \ "target option '$${target_option-}' specified" >&2; \ exit 1;; \ esac; \ has_opt=no; \ sane_makeflags=$$MAKEFLAGS; \ if $(am__is_gnu_make); then \ sane_makeflags=$$MFLAGS; \ else \ case $$MAKEFLAGS in \ *\\[\ \ ]*) \ bs=\\; \ sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \ | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \ esac; \ fi; \ skip_next=no; \ strip_trailopt () \ { \ flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \ }; \ for flg in $$sane_makeflags; do \ test $$skip_next = yes && { skip_next=no; continue; }; \ case $$flg in \ *=*|--*) continue;; \ -*I) strip_trailopt 'I'; skip_next=yes;; \ -*I?*) strip_trailopt 'I';; \ -*O) strip_trailopt 'O'; skip_next=yes;; \ -*O?*) strip_trailopt 'O';; \ -*l) strip_trailopt 'l'; skip_next=yes;; \ -*l?*) strip_trailopt 'l';; \ -[dEDm]) skip_next=yes;; \ -[JT]) skip_next=yes;; \ esac; \ case $$flg in \ *$$target_option*) has_opt=yes; break;; \ esac; \ done; \ test $$has_opt = yes am__make_dryrun = (target_option=n; $(am__make_running_with_option)) am__make_keepgoing = (target_option=k; $(am__make_running_with_option)) pkgdatadir = $(datadir)/@PACKAGE@ pkgincludedir = $(includedir)/@PACKAGE@ pkglibdir = $(libdir)/@PACKAGE@ pkglibexecdir = $(libexecdir)/@PACKAGE@ am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ @WITH_MEGARAID_TRUE@am__append_1 = megaraid_lsmplugin.1 @WITH_HPSA_TRUE@am__append_2 = hpsa_lsmplugin.1 subdir = doc/man DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/Makefile.am \ $(srcdir)/lsmcli.1.in $(srcdir)/lsmd.1.in \ $(srcdir)/sim_lsmplugin.1.in $(srcdir)/simc_lsmplugin.1.in \ $(srcdir)/smispy_lsmplugin.1.in $(srcdir)/ontap_lsmplugin.1.in \ $(srcdir)/targetd_lsmplugin.1.in \ $(srcdir)/nstor_lsmplugin.1.in $(srcdir)/lsmd.conf.5.in \ $(srcdir)/megaraid_lsmplugin.1.in \ $(srcdir)/hpsa_lsmplugin.1.in $(notrans_dist_man1_MANS) \ $(notrans_dist_man5_MANS) ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = $(top_srcdir)/m4/ax_python_module.m4 \ $(top_srcdir)/m4/libtool.m4 $(top_srcdir)/m4/ltoptions.m4 \ $(top_srcdir)/m4/ltsugar.m4 $(top_srcdir)/m4/ltversion.m4 \ $(top_srcdir)/m4/lt~obsolete.m4 $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) mkinstalldirs = $(install_sh) -d CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = lsmcli.1 lsmd.1 sim_lsmplugin.1 simc_lsmplugin.1 \ smispy_lsmplugin.1 ontap_lsmplugin.1 targetd_lsmplugin.1 \ nstor_lsmplugin.1 lsmd.conf.5 megaraid_lsmplugin.1 \ hpsa_lsmplugin.1 CONFIG_CLEAN_VPATH_FILES = AM_V_P = $(am__v_P_@AM_V@) am__v_P_ = $(am__v_P_@AM_DEFAULT_V@) am__v_P_0 = false am__v_P_1 = : AM_V_GEN = $(am__v_GEN_@AM_V@) am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@) am__v_GEN_0 = @echo " GEN " $@; am__v_GEN_1 = AM_V_at = $(am__v_at_@AM_V@) am__v_at_ = $(am__v_at_@AM_DEFAULT_V@) am__v_at_0 = @ am__v_at_1 = SOURCES = DIST_SOURCES = am__can_run_installinfo = \ case $$AM_UPDATE_INFO_DIR in \ n|no|NO) false;; \ *) (install-info --version) >/dev/null 2>&1;; \ esac am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; am__vpath_adj = case $$p in \ $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ *) f=$$p;; \ esac; am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`; am__install_max = 40 am__nobase_strip_setup = \ srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'` am__nobase_strip = \ for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||" am__nobase_list = $(am__nobase_strip_setup); \ for p in $$list; do echo "$$p $$p"; done | \ sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \ $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \ if (++n[$$2] == $(am__install_max)) \ { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \ END { for (dir in files) print dir, files[dir] }' am__base_list = \ sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \ sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g' am__uninstall_files_from_dir = { \ test -z "$$files" \ || { test ! -d "$$dir" && test ! -f "$$dir" && test ! -r "$$dir"; } \ || { echo " ( cd '$$dir' && rm -f" $$files ")"; \ $(am__cd) "$$dir" && rm -f $$files; }; \ } man1dir = $(mandir)/man1 am__installdirs = "$(DESTDIR)$(man1dir)" "$(DESTDIR)$(man5dir)" man5dir = $(mandir)/man5 NROFF = nroff MANS = $(notrans_dist_man1_MANS) $(notrans_dist_man5_MANS) am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) ACLOCAL = @ACLOCAL@ AMTAR = @AMTAR@ AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@ AR = @AR@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ FGREP = @FGREP@ GREP = @GREP@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ JSON_CFLAGS = @JSON_CFLAGS@ JSON_LIBS = @JSON_LIBS@ LD = @LD@ LDFLAGS = @LDFLAGS@ LIBCHECK_CFLAGS = @LIBCHECK_CFLAGS@ LIBCHECK_LIBS = @LIBCHECK_LIBS@ LIBCONFIG_CFLAGS = @LIBCONFIG_CFLAGS@ LIBCONFIG_LIBS = @LIBCONFIG_LIBS@ LIBGLIB_CFLAGS = @LIBGLIB_CFLAGS@ LIBGLIB_LIBS = @LIBGLIB_LIBS@ LIBMICROHTTPD_CFLAGS = @LIBMICROHTTPD_CFLAGS@ LIBMICROHTTPD_LIBS = @LIBMICROHTTPD_LIBS@ LIBOBJS = @LIBOBJS@ LIBS = @LIBS@ LIBSM_LIBTOOL_VERSION = @LIBSM_LIBTOOL_VERSION@ LIBSM_MAJOR_VERSION = @LIBSM_MAJOR_VERSION@ LIBSM_MICRO_VERSION = @LIBSM_MICRO_VERSION@ LIBSM_MINOR_VERSION = @LIBSM_MINOR_VERSION@ LIBSM_VERSION = @LIBSM_VERSION@ LIBSM_VERSION_INFO = @LIBSM_VERSION_INFO@ LIBSM_VERSION_NUMBER = @LIBSM_VERSION_NUMBER@ LIBTOOL = @LIBTOOL@ LIBXML_CFLAGS = @LIBXML_CFLAGS@ LIBXML_LIBS = @LIBXML_LIBS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBOBJS = @LTLIBOBJS@ MAINT = @MAINT@ MAKEINFO = @MAKEINFO@ MANIFEST_TOOL = @MANIFEST_TOOL@ MKDIR_P = @MKDIR_P@ NM = @NM@ NMEDIT = @NMEDIT@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_URL = @PACKAGE_URL@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PERL = @PERL@ PKG_CONFIG = @PKG_CONFIG@ PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@ PKG_CONFIG_PATH = @PKG_CONFIG_PATH@ PYTHON = @PYTHON@ PYTHON_EXEC_PREFIX = @PYTHON_EXEC_PREFIX@ PYTHON_PLATFORM = @PYTHON_PLATFORM@ PYTHON_PREFIX = @PYTHON_PREFIX@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SSL_LIBS = @SSL_LIBS@ STRIP = @STRIP@ VERSION = @VERSION@ YAJL_LIBS = @YAJL_LIBS@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_AR = @ac_ct_AR@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ bashcompletiondir = @bashcompletiondir@ bindir = @bindir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ datadir = @datadir@ datarootdir = @datarootdir@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ install_sh = @install_sh@ libdir = @libdir@ libexecdir = @libexecdir@ localedir = @localedir@ localstatedir = @localstatedir@ mandir = @mandir@ mkdir_p = @mkdir_p@ oldincludedir = @oldincludedir@ pdfdir = @pdfdir@ pkgpyexecdir = @pkgpyexecdir@ pkgpythondir = @pkgpythondir@ prefix = @prefix@ program_transform_name = @program_transform_name@ psdir = @psdir@ pyexecdir = @pyexecdir@ pythondir = @pythondir@ sbindir = @sbindir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ systemdsystemunitdir = @systemdsystemunitdir@ target_alias = @target_alias@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ notrans_dist_man1_MANS = lsmcli.1 lsmd.1 sim_lsmplugin.1 \ simc_lsmplugin.1 smispy_lsmplugin.1 ontap_lsmplugin.1 \ targetd_lsmplugin.1 nstor_lsmplugin.1 $(am__append_1) \ $(am__append_2) notrans_dist_man5_MANS = lsmd.conf.5.in all: all-am .SUFFIXES: $(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu doc/man/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --gnu doc/man/Makefile .PRECIOUS: Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): lsmcli.1: $(top_builddir)/config.status $(srcdir)/lsmcli.1.in cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ lsmd.1: $(top_builddir)/config.status $(srcdir)/lsmd.1.in cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ sim_lsmplugin.1: $(top_builddir)/config.status $(srcdir)/sim_lsmplugin.1.in cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ simc_lsmplugin.1: $(top_builddir)/config.status $(srcdir)/simc_lsmplugin.1.in cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ smispy_lsmplugin.1: $(top_builddir)/config.status $(srcdir)/smispy_lsmplugin.1.in cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ ontap_lsmplugin.1: $(top_builddir)/config.status $(srcdir)/ontap_lsmplugin.1.in cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ targetd_lsmplugin.1: $(top_builddir)/config.status $(srcdir)/targetd_lsmplugin.1.in cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ nstor_lsmplugin.1: $(top_builddir)/config.status $(srcdir)/nstor_lsmplugin.1.in cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ lsmd.conf.5: $(top_builddir)/config.status $(srcdir)/lsmd.conf.5.in cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ megaraid_lsmplugin.1: $(top_builddir)/config.status $(srcdir)/megaraid_lsmplugin.1.in cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ hpsa_lsmplugin.1: $(top_builddir)/config.status $(srcdir)/hpsa_lsmplugin.1.in cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs install-man1: $(notrans_dist_man1_MANS) @$(NORMAL_INSTALL) @list1='$(notrans_dist_man1_MANS)'; \ list2=''; \ test -n "$(man1dir)" \ && test -n "`echo $$list1$$list2`" \ || exit 0; \ echo " $(MKDIR_P) '$(DESTDIR)$(man1dir)'"; \ $(MKDIR_P) "$(DESTDIR)$(man1dir)" || exit 1; \ { for i in $$list1; do echo "$$i"; done; \ if test -n "$$list2"; then \ for i in $$list2; do echo "$$i"; done \ | sed -n '/\.1[a-z]*$$/p'; \ fi; \ } | while read p; do \ if test -f $$p; then d=; else d="$(srcdir)/"; fi; \ echo "$$d$$p"; echo "$$p"; \ done | \ sed 'n;s,.*/,,;p;s,\.[^1][0-9a-z]*$$,.1,' | \ sed 'N;N;s,\n, ,g' | { \ list=; while read file base inst; do \ if test "$$base" = "$$inst"; then list="$$list $$file"; else \ echo " $(INSTALL_DATA) '$$file' '$(DESTDIR)$(man1dir)/$$inst'"; \ $(INSTALL_DATA) "$$file" "$(DESTDIR)$(man1dir)/$$inst" || exit $$?; \ fi; \ done; \ for i in $$list; do echo "$$i"; done | $(am__base_list) | \ while read files; do \ test -z "$$files" || { \ echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(man1dir)'"; \ $(INSTALL_DATA) $$files "$(DESTDIR)$(man1dir)" || exit $$?; }; \ done; } uninstall-man1: @$(NORMAL_UNINSTALL) @list='$(notrans_dist_man1_MANS)'; test -n "$(man1dir)" || exit 0; \ files=`{ for i in $$list; do echo "$$i"; done; \ } | sed 's,.*/,,;s,\.[^1][0-9a-z]*$$,.1,'`; \ dir='$(DESTDIR)$(man1dir)'; $(am__uninstall_files_from_dir) install-man5: $(notrans_dist_man5_MANS) @$(NORMAL_INSTALL) @list1='$(notrans_dist_man5_MANS)'; \ list2=''; \ test -n "$(man5dir)" \ && test -n "`echo $$list1$$list2`" \ || exit 0; \ echo " $(MKDIR_P) '$(DESTDIR)$(man5dir)'"; \ $(MKDIR_P) "$(DESTDIR)$(man5dir)" || exit 1; \ { for i in $$list1; do echo "$$i"; done; \ if test -n "$$list2"; then \ for i in $$list2; do echo "$$i"; done \ | sed -n '/\.5[a-z]*$$/p'; \ fi; \ } | while read p; do \ if test -f $$p; then d=; else d="$(srcdir)/"; fi; \ echo "$$d$$p"; echo "$$p"; \ done | \ sed 'n;s,.*/,,;p;s,\.[^5][0-9a-z]*$$,.5,' | \ sed 'N;N;s,\n, ,g' | { \ list=; while read file base inst; do \ if test "$$base" = "$$inst"; then list="$$list $$file"; else \ echo " $(INSTALL_DATA) '$$file' '$(DESTDIR)$(man5dir)/$$inst'"; \ $(INSTALL_DATA) "$$file" "$(DESTDIR)$(man5dir)/$$inst" || exit $$?; \ fi; \ done; \ for i in $$list; do echo "$$i"; done | $(am__base_list) | \ while read files; do \ test -z "$$files" || { \ echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(man5dir)'"; \ $(INSTALL_DATA) $$files "$(DESTDIR)$(man5dir)" || exit $$?; }; \ done; } uninstall-man5: @$(NORMAL_UNINSTALL) @list='$(notrans_dist_man5_MANS)'; test -n "$(man5dir)" || exit 0; \ files=`{ for i in $$list; do echo "$$i"; done; \ } | sed 's,.*/,,;s,\.[^5][0-9a-z]*$$,.5,'`; \ dir='$(DESTDIR)$(man5dir)'; $(am__uninstall_files_from_dir) tags TAGS: ctags CTAGS: cscope cscopelist: distdir: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done check-am: all-am check: check-am all-am: Makefile $(MANS) installdirs: for dir in "$(DESTDIR)$(man1dir)" "$(DESTDIR)$(man5dir)"; do \ test -z "$$dir" || $(MKDIR_P) "$$dir"; \ done install: install-am install-exec: install-exec-am install-data: install-data-am uninstall: uninstall-am install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-am install-strip: if test -z '$(STRIP)'; then \ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ install; \ else \ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ fi mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-am clean-am: clean-generic clean-libtool mostlyclean-am distclean: distclean-am -rm -f Makefile distclean-am: clean-am distclean-generic dvi: dvi-am dvi-am: html: html-am html-am: info: info-am info-am: install-data-am: install-man install-dvi: install-dvi-am install-dvi-am: install-exec-am: install-html: install-html-am install-html-am: install-info: install-info-am install-info-am: install-man: install-man1 install-man5 install-pdf: install-pdf-am install-pdf-am: install-ps: install-ps-am install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-am -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-am mostlyclean-am: mostlyclean-generic mostlyclean-libtool pdf: pdf-am pdf-am: ps: ps-am ps-am: uninstall-am: uninstall-man uninstall-man: uninstall-man1 uninstall-man5 .MAKE: install-am install-strip .PHONY: all all-am check check-am clean clean-generic clean-libtool \ cscopelist-am ctags-am distclean distclean-generic \ distclean-libtool distdir dvi dvi-am html html-am info info-am \ install install-am install-data install-data-am install-dvi \ install-dvi-am install-exec install-exec-am install-html \ install-html-am install-info install-info-am install-man \ install-man1 install-man5 install-pdf install-pdf-am \ install-ps install-ps-am install-strip installcheck \ installcheck-am installdirs maintainer-clean \ maintainer-clean-generic mostlyclean mostlyclean-generic \ mostlyclean-libtool pdf pdf-am ps ps-am tags-am uninstall \ uninstall-am uninstall-man uninstall-man1 uninstall-man5 # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: libstoragemgmt-1.2.3/doc/man/nstor_lsmplugin.10000664000175000017500000000331712542455451016334 00000000000000.TH nstor_lsmplugin "1" "June 2015" "nstor_lsmplugin 1.2.3" "libStorageMgmt" .SH NAME nstor_lsmplugin -- LibstorageMgmt nstor plugin .SH DESCRIPTION LibstorageMgmt nstor plugin allows user to manage NexentaStor 3.x storage software [1]. The 'nstor_lsmplugin' executable file is for libStorageMgmt daemon to execute when client user specifies nstor plugin in the URI. .SH URI To use this plugin, users should set their URI to this format: .nf # HTTP connection \fBnstor://@:\fR # HTTPS connection \fBnstor+ssl://@:\fR .fi .TP \fBusername\fR The \fBusername\fR is the user account with 'Can_use_restapi' permission. .TP \fBnstor_server\fR The \fBnstor_server\fR is IP address or DNS name of NexentaStor server. .TP \fBport number\fR The \fBport number\fR is the listening port of the nexenta server REST API. The default port of 2000 is used if none is supplied on the URI. For non-default ports, SELinux will likely prevent the SMI-S plugin from making a network connection. .TP \fBURI parameters\fR No additional URI parameters are supported by this plugin. .SH SUPPORTED SOFTWARE NexentaStor 3.x is supported. Detailed support status can be queried via: * \fBlsm.Client.capabilities()\fR (Python API) * \fBlsm_capabilities()\fR (C API) * \fBlsmcli capabilities\fR (lsmcli command line). .SH FIREWALL RULES By default, this plugin requires the access to the nstor array's TCP 2000 port. .SH SEE ALSO \fBlsmcli\fR(1), \fBlsmd\fR(1), [1] http://nexentastor.org .SH BUGS Please report bugs to \fI\fR .SH AUTHOR Gris Ge \fI\fR .br Tony Asleson \fI\fR libstoragemgmt-1.2.3/doc/man/simc_lsmplugin.1.in0000664000175000017500000000242112537546123016523 00000000000000.TH simc_lsmplugin "1" "June 2015" "simc_lsmplugin @VERSION@" "libStorageMgmt" .SH NAME simc_lsmplugin -- LibstorageMgmt Simulator C Plugin .SH DESCRIPTION LibStorageMgmt simulator C plugin is for development use. The plugin simulates an array which supports most features of the library. The simulator is memory based, state will be discarded once the plugin exits. The 'simc_lsmplugin' executable file is for the libStorageMgmt daemon to execute when client user specifies simc plugin in the URI. Since every command of lsmcli is a standalone libStorageMgmt session, this plugin is essentially useless for this purpose. In this use case, the libStorageMgmt simulator plugin \fBsim_lsmplugin(1)\fR is suggested. This plugin is written in pure C and is intended to be an example of a C plugin for the library. .SH URI To use this plugin, users should set their URI to this format: .nf simc:// .fi No password is required for this plugin. No URI parameters are supported by this plugin. .SH FIREWALL RULES This plugin requires not network access. .SH SEE ALSO \fBlsmcli\fR(1), \fBlsmd\fR(1), \fBsim_lsmplugin\fR(1) .SH BUGS Please report bugs to \fI\fR .SH AUTHOR Gris Ge \fI\fR .br Tony Asleson \fI\fR libstoragemgmt-1.2.3/doc/man/lsmd.conf.5.in0000664000175000017500000000343212537546123015370 00000000000000.TH lsmd.conf "5" "January 2015" "lsmd.conf @VERSION@" "libStorageMgmt daemon config" .SH NAME lsmd.conf - libStorageMgmt daemon lsmd configuration file. .SH DESCRIPTION The libStorageMgmt plugin daemon (\fBlsmd\fR) will read the \fBlsmd.conf\fR file in the folder defined via \fB--confdir\fR (default is \fB/etc/lsm/\fR). The daemon reads the plugin configuration files from the sub-folder \fBpluginconf.d\fR. The plugin configuration file should be named as .conf, for example: * MegaRAID plugin (\fBmegaraid\fR://): \fBmegaraid.conf\fR The \fBlsmd.conf\fR file controls the global settings for \fBlsmd\fR while the plugin configuration file for each plugin controls individual plugin behavior. Each option line of the configuration file should contain a trailing semicolon(\fB;\fR). .SH lsmd.conf OPTIONS .TP \fBallow-plugin-root-privilege = true;\fR Indicates whether the \fBlsmd\fR daemon should keep running as root mode and invoke plugin as root user when needed. Without this option or with option set as \fBfalse\fR means that the daemon and the plugins will never run as root. Only when all the following requirements are met, will \fBlsmd\fR run specified plugins as root user: 1. "allow-plugin-root-privilege = true;" in lsmd.conf 2. "require-root-privilege = true;" in plugin config 3. API connection (or lsmcli) has root privileges .SH Plugin OPTIONS .TP \fBrequire-root-privilege = true;\fR Indicates plugin requires root privilege. Without this line or set as \fBfalse\fR, the plugin will never be invoked as root user by \fBlsmd\fR. Please check \fBlsmd.conf\fR option \fBallow-plugin-root-privilege\fR for detail. .SH SEE ALSO \fIlsmd (1)\fR .SH BUGS Please report bugs to .SH AUTHOR Gris Ge libstoragemgmt-1.2.3/doc/man/lsmcli.10000664000175000017500000005326112542455451014363 00000000000000.TH LSMCLI "1" "September 2014" "lsmcli 1.2.3" "libStorageMgmt" .SH NAME lsmcli - libStorageMgmt command line interface .SH SYNOPSIS .B lsmcli \fIcommand\fR [\fIGLOBAL OPTIONS\fR]...[\fICOMMAND OPTIONS\fR]... .SH DESCRIPTION lsmcli is the command line tool for the libStorageMgmt library. This tool allows users to do one off storage related management operations or to script management of their storage. .SH PREREQUISITES .TP 8 * libStorageMgmt daemon. The daemon '\fBlsmd\fR' is required by lsmcli. .TP 8 * URI(Uniform Resource Identifier) URI is used to identify which plugin to use and how the plugin should communicate with the storage array. The valid URI format is: .RS 12 .nf \fBplugin://@host:?\fR .br \fBplugin+ssl://@host:?\fR .fi .RE 1 .RS 8 Examples, please refer to "LibStorageMgmt User Guide" for more details: .RE 1 .RS 12 * Simulator: .RS 4 \fBsim://\fR .br \fBsimc://\fR .RE 1 .RS 12 * NetApp ONTAP: .RS 4 \fBontap://username@host\fR .br \fBontap+ssl://username@host\fR .RE 1 .RS 12 * SMI-S supported arrays (eg. EMC CX/VNX, HDS AMS, IBM SVC/DS, LSI MegaRAID and others): .RS 4 \fBsmis://username@host:?namespace=\fR .br \fBsmis+ssl://username@host:?namespace=\fR .RE 1 .RS 8 You can pass URI to lsmcli via one of these methods: .RS 4 * Using '\fB-u\fR, \fB--uri\fR' argument. .br * Using '\fBLSMCLI_URI\fR' environment variable. .br * Add this line into \fB$HOME/.lsmcli\fR: .RS 4 uri=\fI\fR .RE 1 .TP 8 * Password For storage array password authentication you can pass it to lsmcli via one of the following methods: .RS 12 * '\fB-P\fR, \fB--prompt\fR' argument to prompt for password. .br * '\fBLSMCLI_PASSWORD\fR' environment variable. .RE 1 .SH "GLOBAL OPTIONS" .TP 15 \fB--version\fR Show program's version number and exit .TP 15 \fB-h\fR, \fB--help\fR Show this help message and exit. Will show help message of specific command if specified. .TP 15 \fB-u\fR \fI\fR, \fB--uri\fR \fI\fR Uniform Resource Identifier (env LSMCLI_URI) .TP 15 \fB-P\fR, \fB--prompt\fR Prompt for password (env LSMCLI_PASSWORD) .TP 15 \fB-H\fR, \fB--human\fR Print sizes in human readable format (e.g., KiB, MiB, GiB, TiB, PiB, EiB) .TP 15 \fB-t\fR \fI\fR, \fB--terse\fR \fI\fR Print output in terse form with "SEP" as a record separator without header unless '--header' defined. .TP 15 \fB--header\fR Include the header with terse .TP 15 \fB-e\fR, \fB--enum\fR Display enumerated types as numbers instead of text .TP 15 \fB-f\fR, \fB--force\fR Bypass confirmation prompt for data loss operations .TP 15 \fB-w\fR \fI\fR, \fB--wait\fR=\fI\fR Command timeout value in ms (default = 30s) .TP 15 \fB-b\fR Run the command asynchronously instead of waiting for completion. The lsmcli command will exit with exit code(7) and job id will be written to STDOUT when a command is still executing on the storage array. Use 'job-status --id \fI\fR' to inquire on the progress of the command. Some arrays or plugins might not support asynchronous operations, in those circumstances, \fb-b\fR will be ineffective. Command will wait until finished. .TP 15 \fB-s\fR, \fB--script\fR Displaying data in script friendly way. .br Without this option, data is displayed in this manner (default): ID | Name | Element Type ... ------------------------------------------ ... aggr0 | aggr0 | FS,SYSTEM_RESERVED,POOL ... iscsi | iscsi | FS,POOL ... With this option, data is displayed in this manner. ----------------------------------------------- ID | aggr0 Name | aggr0 Element Type | FS,SYSTEM_RESERVED,POOL ... ----------------------------------------------- ID | iscsi Name | iscsi Element Type | FS,POOL ... Please note: .br To reduce the width of output, NOT all properties will be displayed in default column display. .SH COMMANDS .SS list List information on LSM objects .TP 15 \fB--type\fR \fI\fR Required. Valid values are (case insensitive): .br \fBVOLUMES\fR, \fBPOOLS\fR, \fBFS\fR, \fBSNAPSHOTS\fR, \fBEXPORTS\fR, \fBNFS_CLIENT_AUTH\fR, .br \fBACCESS_GROUPS\fR, \fBSYSTEMS\fR, \fBDISKS\fR, \fBPLUGINS\fR, \fBTARGET_PORTS\fR. .TP \fB--fs\fR \fI\fR Required for \fB--type\fR=\fBSNAPSHOTS\fR. List the snapshots of certain filesystem. PLUGINS will list all supported plugins of LSM, not only the current one. .TP \fB--sys\fR \fI\fR Search resources from system with SYS_ID. Only supported when querying these types of resources: \fBVOLUMES\fR, \fBPOOLS\fR, \fBFS\fR, \fBSNAPSHOTS\fR, \fBDISKS\fR, \fBACCESS_GROUPS\fR. .TP \fB--pool\fR \fI\fR Search resources from pool with POOL_ID. Only supported by these types of resources: \fBVOLUMES\fR, \fBPOOLS\fR, \fBFS\fR. .TP \fB--vol\fR \fI\fR Search resources from volume with VOL_ID. Only supported by these types of resources: \fBVOLUMES\fR, \fBACCESS_GROUPS\fR. .br To query volume masking status, please use this command: lsmcli list --type ACCESS_GROUPS --vol .TP \fB--disk\fR \fI\fR Search resources from disk with DISK_ID. Only supported by these types of resources: \fBDISK\fR. .TP \fB--ag\fR \fI\fR Search resources from access group with AG_ID. Only supported by these types of resources: \fBACCESS_GROUPS\fR, \fBVOLUMES\fR. .br To query volume masking status, please use this command: lsmcli list --type VOLUMES --ag .TP \fB--fs\fR \fI\fR Search resources from file system with FS_ID. Only supported by these types of resources: \fBFS\fR. .TP \fB--nfs-export\fR \fI\fR Search resources from NFS export with NFS_EXPORT_ID. Only supported by these types of resources: \fBEXPORTS\fR. .TP \fB--tgt\fR \fI\fR Search resources from target port with target port ID. Only supported by these types of resources: \fBTARGET_PORTS\fR. .SS job-status Retrieve information about a job. Please see user guide on how to use. .TP 15 \fB--job\fR \fI\fR .SS capabilities Retrieves array capabilities. .TP 15 \fB--sys\fR \fI\fR Required. ID of the system to query for capabilities. .SS plugin-info Retrieves plugin description and version for current URI. .SS volume-create Creates a volume (AKA., logical volume, virtual disk, LUN). .TP 15 \fB--name\fR \fI\fR Required. Volume name. .TP \fB--size\fR \fI\fR Required. Volume size (See \fBSIZE OPTION\fR for allowed formats). .TP \fB--pool\fR \fI\fR Required. ID of pool. .TP \fB--provisioning\fR \fI\fR Optional. Provisioning type. Valid values are: DEFAULT, THIN, FULL. \fBDEFAULT\fR means let plugin choose. \fBTHIN\fR means requiring a Thin Provisioning enabled volume. \fBFULL\fR means requiring a fully allocated volume. .SS volume-raid-create Creates a volume on hardware RAID on given disks. .TP 15 \fB--name\fR \fI\fR Required. Volume name. Might be altered or ignored due to hardware RAID card vendor limitation. .TP \fB--raid-type\fR \fI\fR Required. Could be one of these values: \fBRAID0\fR, \fBRAID1\fR, \fBRAID5\fR, \fBRAID6\fR, \fBRAID10\fR, \fBRAID50\fR, \fBRAID60\fR. The supported RAID types of current RAID card could be queried via command "\fBvolume-raid-create-cap\fR". .TP \fB--disk\fR \fI\fR Required. Repeatable. The disk ID for new RAID group. .TP \fB--strip-size\fR \fI\fR Optional. The size in bytes of strip on each disks. If not defined, will let hardware card to use the vendor default value. The supported stripe size of current RAID card could be queried via command "\fBvolume-raid-create-cap\fR". .SS volume-raid-create-cap Query support status of volume-raid-create command for current hardware RAID card. .TP 15 \fB--sys\fR \fI\fR Required. ID of the system to query for capabilities. .SS volume-delete .TP 15 Delete a volume given its ID .TP \fB--vol\fR \fI\fR Required. The ID of volume to delete. .SS volume-resize .TP 15 Re-sizes a volume, requires: .TP \fB--vol\fR \fI\fR Required. The ID of volume to resize. .TP \fB--size\fR \fI\fR Required. The new size of volume.(See \fBSIZE OPTION\fR for allowed formats). Due to boundary alignment concern, array might return a volume with slightly bigger size than requested. .SS volume-replicate Creates a new volume and replicates provided volume to it. .TP 15 \fB--vol\fR \fI\fR Required. The ID of volume to replicate. .TP \fB--name\fR \fI\fR Required. The name for new volume to hold replicated data. .TP \fB--rep-type\fR \fI\fR (see \fBVOLUME REPLICATION TYPES)\fR Required. Valid types of replication are: .br \fBCLONE\fR, \fBCOPY\fR, \fBMIRROR_ASYNC\fR, \fBMIRROR_SYNC\fR. .TP \fB--pool\fR \fI\fR Optional. The ID of pool where the new volume should be created from. If not specified, plugin or array will choose appropriate pool. .SS volume-replicate-range Replicates a portion of a volume to the same volume or to a different volume. .TP 15 \fB--src-vol\fR \fI\fR Required. The ID of replication source volume. .TP \fB--dst-vol\fR \fI\fR Required. The ID of replication destination volume. .TP \fB--rep-type\fR \fI\fR (see \fBVOLUME REPLICATION TYPES)\fR Required. Appropriate types of replication are: .br \fBCLONE\fR, \fBCOPY\fR. .TP \fB--src-start\fR \fI\fR Required. Replication source volume start block number. Must in pair with \fB--count\fR and \fB--dst-start\fR. If you have several non-continuous block ranges, you can define repeatly define this arugument, like '\fB--src-start 0 --dst-start 0 --count 1024 --src-start 2048 --dst-start 2048 --count 2048\fR' .TP \fB--dst-start\fR \fI\fR Required. Replication destination volume start block number. Must in pair with \fB--count\fR and \fB--src-start\fR. .TP \fB--count\fR \fI\fR Required. The count of replicated block startting from \fB--src-start\R block. Must in pair with \fB--src-start\fR and \fB--dst-start\fR. .SS volume-replicate-range-block-size Size of each replicated block on a system in bytes. .TP 15 \fB--sys\fR \fI\fR Required. ID of the system to query for replicated block size. .SS volume-dependants Returns True if volume has a dependant child, like replication. .TP 15 \fB--vol\fR \fI\fR Required. The ID of volume to query dependency. .SS volume-dependants-rm Removes volume dependencies(like replication). .TP 15 \fB--vol\fR \fI\fR Required. The ID of volume to remove dependency. .SS volume-access-group Lists the access group(s) that have access to the provided volume. .TP 15 \fB--vol\fR \fI\fR Required. The ID of volume to query access. .SS volume-mask .TP 15 Grant access group RW access to certain volume. Like LUN masking or NFS export. .TP \fB--vol\fR \fI\fR Required. The ID of volume to access. .TP \fB--ag\fR \fI\fR Required. The ID of access group to grant. .SS volume-unmask .TP 15 Revoke access group RW access to specified volume. .TP \fB--vol\fR \fI\fR Required. The ID of volume to revoke. .TP \fB--ag\fR \fI\fR Required. The ID of access group to revoke. .SS volume-raid-info .TP 15 Query RAID information for given volume. .TP \fB--vol\fR \fI\fR Required. The ID of volume to query. .SS pool-member-info .TP 15 Query RAID information for given pool. .TP \fB--pool\fR \fI\fR Required. The ID of pool to query. .SS access-group-create .TP 15 Create an access group. .TP \fB--name\fR \fI\fR Required. The human friendly name for new access group. .TP \fB--init\fR \fI\fR Required. The first initiator ID of new access group. WWPN or iSCSI IQN. .TP \fB--sys\fR \fI\fR Required. The ID of system where this access group should reside on. .SS access-group-add Adds an initiator to an access group. .TP 15 \fB--ag\fR \fI\fR Required. ID of access group. .TP \fB--init\fR \fI\fR Required. ID of initiator to add. WWPN or iSCSI IQN. .SS access-group-remove Removes an initiator from an access group. .TP 15 \fB--ag\fR \fI\fR Required. ID of access group. .TP \fB--init\fR \fI\fR Required. ID of initiator to remove. .SS access-group-delete Delete an access group. .TP 15 \fB--ag\fR \fI\fR Required. ID of access group to delete. .SS access-group-volumes Lists the volumes that the access group has been granted access to. .TP 15 \fB--ag\fR \fI\fR Required. The ID of access group to query. .SS iscsi-chap Configures ISCSI inbound/outbound CHAP authentication. .TP 15 \fB--init\fR \fI\fR Required. The ID of iSCSI initiator to configure. .TP \fB--in-user\fR \fI\fR Optional. Inbound CHAP user name. .TP \fB--in-pass\fR \fI\fR Optional. Inbound CHAP password. .TP \fB--out-user\fR \fI\fR Optional. Outbound CHAP user name. .TP \fB--out-pass\fR \fI\fR Optional. Outbound CHAP password. .SS fs-create Creates a filesystem. .TP 15 \fB--name\fR \fI\fR Required. Human friendly name for new filesystem. .TP \fB--size\fR \fI\fR Required. Volume size(See \fBSIZE OPTION\fR for allowed formats). .TP \fB--pool\fR \fI\fR Required. ID of pool to hold the new filesystem. .SS fs-delete Delete a filesystem. .TP 15 \fB--fs\fR \fI\fR Required. ID of the filesystem to delete. .SS fs-resize Resizes a filesystem. .TP 15 \fB--fs\fR \fI\fR Required. ID of the filesystem to resize. .TP \fB--size\fR \fI\fR Required. New size of filesystem. See \fBSIZE OPTION\fR for allowed formats. .SS fs-export Export a filesystem via NFS. .TP 15 \fB--fs\fR \fI\fR Required. ID of the filesystem to export. .TP \fB--exportpath\fR \fI\fR Optional. NFS server export path. e.g. '/foo/bar'. .TP \fB--anonuid\fR \fI\fR Optional. The UID(User ID) to map to anonymous user. .TP \fB--anongid\fR \fI\fR Optional. The GID(Group ID) to map to anonymous user. .TP \fB--auth-type\fR \fI\fR Optional. NFS client authentication type. This is just a place holder, not supported yet. .TP \fB--root-host\fR \fI\fR Optional. Repeatable. The host/IP has root access. For two or more hosts/IPs: '\fB--root-host hostA --root-host hostB\fR'. .TP \fB--ro-host\fR \fI\fR Optional. Repeatable. The host/IP has read only access. For two or more hosts/IPs: '\fB--ro-host hostA --ro-host hostB\fR'. .TP \fB--rw-host\fR \fI\fR Optional. The host/IP has read/write access. For two or more hosts/IPs: '\fB--rw-host hostA --rw-host hostB\fR'. .SS fs-unexport Remove an NFS export. .TP 15 \fB--fs\fR \fI\fR Required. ID of the filesystem to unexport. .SS fs-clone Creates a file system clone. The 'clone' means point in time read writeable space efficient copy of data, AKA. read-writable snapshot. .TP 15 \fB--src-fs\fR \fI\fR Required. The ID of the filesystem to clone. .TP \fB--dst-name\fR \fI\fR Required. The name for newly created destination file system. .TP \fB--backing-snapshot\fR \fI\fR Optional. Make a FS clone using a previously created snapshot. .SS fs-snap-create Creates a snapshot of specified filesystem. A snapshot is defined as a read only space efficient point in time copy (PIT) of a filesystem. The source filesystem remains modifiable. .TP 15 \fB--name\fR \fI\fR Required. The human friendly name of new snapshot. .TP \fB--fs\fR \fI\fR Required. The ID of filesystem to create snapshot against. .SS fs-snap-delete Deletes a snapshot. .TP 15 \fB--snap\fR \fI\fR Required. The ID of snapshot to delete. .TP \fB--fs\fR \fI\fR Required. The ID of filesystem. .SS fs-snap-restore Restores a FS or specified files to previous snapshot state. This will discard all the changes to filesystem since snapshot if specific files are not specified in restore. .TP 15 \fB--fs\fR \fI\fR Required. The ID of filesystem to restore. .TP \fB--snap\fR \fI\fR Required. The ID of snapshot to restore. .TP \fB--file\fR \fI\fR Optional. Repeatable. With this option defined, will only restore the defined file(s). '\fB--file fileA --file pathB\fR'. .TP \fB--fileas\fR \fI\fR Optional. Repeatable. With this option defined, the restored file will be saved to specified path and filename, eg. '\fB--file fileA --fileas old_fileA \fR'. .SS fs-dependants Returns True if a child dependency (snapshot or clone) exists. .TP 15 \fB--fs\fR \fI\fR Required. The ID of filesystem to query. .TP \fB--file\fR \fI\fR Optional. Repeatable. Only check for dependencies on specific file(s), eg. '\fB--file fileA --file pathB\fR'. .SS fs-dependants-rm Removes filesystem dependencies(snapshot or clone). .TP 15 \fB--fs\fR \fI\fR Required. The ID of filesystem to remove dependency. .TP \fB--file\fR \fI\fR Optional. Repeatable. Only remove dependencies on specific file(s), eg. '\fB--file fileA --file pathB\fR'. .SS file-clone Creates a clone of a file (thin provisioned). Note: --src and --dst need to be paired eg. '\fB--src fileA --src fileB --dst fileA_clone --dst fileB_clone\fR'. .TP 15 \fB--src\fR \fI\fR Required. Repeatable. Source file to clone (relative path). .TP \fB--dst\fR \fI\fR Required. Repeatable. Destination file for clone (relative path). .IP .SH ALIAS .SS ls Alias of 'list --type systems' .SS lp Alias of 'list --type pools' .SS lv Alias of 'list --type volumes' .SS ld Alias of 'list --type disks' .SS la Alias of 'list --type access_groups' .SS lf Alias of 'list --type fs' .SS lt Alias of 'list --type target_ports' .SS c Alias of 'capabilities' .SS p Alias of 'plugin-info' .SS vc Alias of 'volume-create' .SS vrc Alias of 'volume-raid-create' .SS vrcc Alias of 'volume-raid-create-cap' .SS vd Alias of 'volume-delete' .SS vr Alias of 'volume-resize' .SS vm Alias of 'volume-mask' .SS vu Alias of 'volume-unmask' .SS vri Alias of 'volume-raid-info' .SS pmi Alias of 'pool-member-info' .SS ac Alias of 'access-group-create' .SS aa Alias of 'access-group-add' .SS ar Alias of 'access-group-remove' .SS ad Alias of 'access-group-delete' .IP .SH SIZE OPTION .SS \fB--size\fR \fI\fI\fR\fR Storage space size. Format is '\fI\fR' + '\fI\fR'. Example: "10GiB", "20.5MB". No postfix indicates bytes. Valid prefixes are: KiB, # 2^10 Bytes MiB, # 2^20 Bytes GiB, # 2^30 Bytes TiB, # 2^40 Bytes PiB, # 2^50 Bytes EiB, # 2^60 Bytes KB, # 10^3 Bytes MB, # 10^6 Bytes GB, # 10^9 Bytes TB, # 10^12 Bytes PB, # 10^15 Bytes EB, # 10^17 Bytes These prefixes are supported also, but not recommended: K, M, G, T, P, E, # equal to KiB, MiB, and etc k, m, g, t, p, e, # equal to KiB, MiB, and etc .SH FILES .TP 15 ~/.lsmcli lsmcli configuration file, containing name-value pairs separated by '='. The only currently supported configuration option is 'uri', such as 'uri=ontap://user@filer.example.com'. Configuration options in .lsmcli are only used if not overridden by command-line option or environment variable. .SH EXAMPLES (command output omitted for brevity) .TP 15 Simulator, list pools (no password required) $ lsmcli -u sim:// -l POOLS .TP 15 NetApp, list volumes (prompting for password) $ lsmcli -u ontap://root@host/ -l VOLUMES -P .TP 15 SMI-S, list systems (prompting for password) .nf $ lsmcli -u smispy://username@host:5988/?namespace=root/interop \\ -l SYSTEMS -P .fi .TP 15 Targetd, list pools (using env variables for URI and password) $ export LSMCLI_URI=targetd://username@host:18700 .br $ export LSMCLI_PASSWORD=\fI\fR .br $ lsmcli -l POOLS .TP 15 NexentaStor, create volume (using environment variables for URI and password) .nf $ export LSMCLI_URI='nstor://user@host' $ export LSMCLI_PASSWORD=\fI\fR $ lsmcli volume-create --name volume_name --size 1TiB --pool default .fi .TP 15 SMI-S, create volume (using environment variables for URI and password) .nf $ export LSMCLI_URI='smispy+ssl://user@host:5989?namespace=root/emc' $ export LSMCLI_PASSWORD=\fI\fR $ lsmcli volume-create --name volume_name --size 1TiB --pool default .fi .SH ENVIRONMENT .TP 17 LSMCLI_URI The URI for the storage array in question. .TP 17 LSMCLI_PASSWORD The password to use for the array. .SH VOLUME REPLICATION TYPES .TP 17 CLONE A point in time, read writeable, space efficent copy of data. .TP 17 COPY A full bitwise copy of the data. It occupies the full space. .TP 17 MIRROR_SYNC Continously updated, realtime with both copies having identical data. .TP 17 MIRROR_ASYNC Continously updated, with a varying amount of delay and data delta between the source and target. .SH NOTES .TP 8 Plugin installation Plugins are installed individually except for the simulators which are always included. .TP 8 Secure sockets layer (SSL) All of the plugins (except the simulator) support SSL when communicating from the plugin to the array. This is accomplished by adding "+ssl" to the plugin and usually by selecting a different port number from non-SSL communications. .br .nf $ lsmcli -u smispy+ssl://username@host:5989/?namespace=interop \\ list --type SYSTEMS -P .fi .TP 8 SSL error: certificate verify failed When using SMI-S plugin with SSL against self-signed SMI-S provider, lsmcli normally quit with '\fBSSL error: certificate verify failed\fR'. Please contact SMI-S provider support to setup the self-signed certificate in your system. If you prefer to bypass the certificate check, add 'no_ssl_verify=yes' at the end of URI, for example: .RS 12 .nf \fBsmispy+ssl://admin@emc-smi:5989?namespace=root/emc&no_ssl_verify=yes\fR .fi .SH BUGS Please report bugs to \fI\fR .SH AUTHOR Tony Asleson \fI\fR .br Gris Ge \fI\fR libstoragemgmt-1.2.3/doc/man/targetd_lsmplugin.10000664000175000017500000000326712542455451016625 00000000000000.TH targetd_lsmplugin "1" "June 2015" "targetd_lsmplugin 1.2.3" "libStorageMgmt" .SH NAME targetd_lsmplugin -- libStorageMgmt targetd plugin .SH DESCRIPTION LibStorageMgmt targetd plugin allows user to manage storage using the targetd storage API [1]. The 'targetd_lsmplugin' executable file is for libStorageMgmt daemon to execute when client user specifies targetd plugin in the URI. .SH URI To use this plugin, users should set their URI to this format: .nf # HTTP connection \fBtargetd://@:\fR # HTTPS connection \fBtargetd+ssl://@:\fR .fi .TP \fBusername\fR The \fBusername\fR is the user account configured in targetd configuration file. .TP \fBtargetd_server\fR The \fBtargetd_server\fR is the IP address or DNS name of server running targetd daemon. .TP \fBport number\fR The \fBport number\fR is the listening port of the targetd daemon. The default port of 18700 is used if none is supplied on the URI. .TP \fBURI parameters\fR No additional URI parameters are supported by this plugin. .SH SUPPORTED SOFTWARE Linux targetd 0.7.1 or later version. Detailed support status can be queried via: * \fBlsm.Client.capabilities()\fR (Python API) * \fBlsm_capabilities()\fR (C API) * \fBlsmcli capabilities\fR (lsmcli command line). .SH FIREWALL RULES By default, this plugin requires access to the targetd server's TCP 18700 port. .SH SEE ALSO \fBlsmcli\fR(1), \fBlsmd\fR(1), [1] https://github.com/agrover/targetd .SH BUGS Please report bugs to \fI\fR .SH AUTHOR Gris Ge \fI\fR .br Tony Asleson \fI\fR libstoragemgmt-1.2.3/doc/man/sim_lsmplugin.1.in0000664000175000017500000000245512537546123016367 00000000000000.TH sim_lsmplugin "1" "June 2015" "sim_lsmplugin @VERSION@" "libStorageMgmt" .SH NAME sim_lsmplugin -- LibStorageMgmt simulator python plugin .SH DESCRIPTION LibstorageMgmt simulator plugin is for development or test use only. The plugin simulates an array which supports most features of the library. State is stored in a file, default is '/tmp/lsm_sim_data'. The 'sim_lsmplugin' executable file is for libStorageMgmt daemon to execute when client user specifies sim plugin in the URI. This plugin is written in pure Python and is intended to be an example of a python plugin for the library. .SH URI To use this plugin, users should set their URI to this format: .nf # All that is required sim:// # Optional statefile sim://?statefile= .fi No password is required for this plugin. .TP \fBURI parameters\fR .RS 7 .TP \fBstatefile\fR Use specified file to store simulator state data. Example URI: .nf \fBsim://?statefile=/tmp/other_lsm_sim_data\fR .fi The statefile is a sqlite3 data base file. .SH FIREWALL RULES This plugin requires not network access. .SH SEE ALSO \fBlsmcli\fR(1), \fBlsmd\fR(1) .SH BUGS Please report bugs to \fI\fR .SH AUTHOR Gris Ge \fI\fR .br Tony Asleson \fI\fR libstoragemgmt-1.2.3/doc/man/ontap_lsmplugin.1.in0000664000175000017500000000326212537546123016715 00000000000000.TH ontap_lsmplugin "1" "June 2015" "ontap_lsmplugin @VERSION@" "libStorageMgmt" .SH NAME ontap_lsmplugin -- LibstorageMgmt ONTAP Plugin .SH DESCRIPTION LibstorageMgmt ontap plugin allows user to manage NetApp ONTAP storage arrays. The 'ontap_lsmplugin' executable file is for libStorageMgmt daemon to execute when client specifies ontap plugin in the URI This plugin requires NetApp ONTAP storage array to enable these options: \fBoptions httpd.enable on\fR \fBoptions httpd.admin.enable on\fR This options is required for HTTPS connection: \fBoptions httpd.admin.ssl.enable on\fR .SH URI To use this plugin, users should set their URI to this format: .nf # HTTP connection \fBontap://@\fR # HTTPS connection \fBontap+ssl://@\fR .fi .TP \fBusername\fR The \fBusername\fR is the user account with administration privilege. .TP \fBontap_filer_ip\fR The \fBontap_filer_ip\fR is the NetApp ONTAP filer IP address or DNS name. .TP \fBURI parameters\fR No additional URI parameters are supported by this plugin. .SH Supported Hardware NetApp ONTAP 8.x is supported. Detailed support status can be queried via: * \fBlsm.Client.capabilities()\fR (Python API) * \fBlsm_capabilities()\fR (C API) * \fBlsmcli capabilities\fR (lsmcli command line). .SH FIREWALL RULES This plugin requires the access to the NetApp ONTAP Filer's TCP 80 port for HTTP connection and TCP 443 port for HTTPS connection. .SH SEE ALSO \fBlsmcli\fR(1), \fBlsmd\fR(1) .SH BUGS Please report bugs to \fI\fR .SH AUTHOR Gris Ge \fI\fR .br Tony Asleson \fI\fR libstoragemgmt-1.2.3/doc/man/hpsa_lsmplugin.10000664000175000017500000000313112542455451016114 00000000000000.TH hpsa_lsmplugin "1" "March 2015" "hpsa_lsmplugin 1.2.3" "libStorageMgmt" .SH NAME hpsa_lsmplugin -- LibstorageMgmt HP SmartArray plugin .SH DESCRIPTION LibstorageMgmt hpsa plugin allows user to manage HP SmartArray via vendor tool \fBhpssacli\fR[1]. The 'hpsa_lsmplugin' executable file is for libStorageMgmt daemon to execute when client user specifies hpsa plugin in the URI. .SH URI To use this plugin, users should set their URI to this format: .nf \fBhpsa://\fR or \fBhpsa://?hpssacli=\fR .fi .TP \fBURI parameters\fR These URI parameters are supported by this plugin: .RS 7 .TP \fBhpssacli\fR The 'hpssacli' URI parameter is used to specified the path of hpssacli tool. By default, this plugin will try these paths used by hpssacli rpm: \fB/usr/sbin/hpssacli\fR and \fB/opt/hp/hpssacli/bld/hpssacli\fR. .SH ROOT PRIVILEGE This plugin requires both \fBlsmd\fR daemon and API client running as root user. Please check manpage \fIlsmd.conf (5)\fR for detail. .SH SUPPORTED HARDWARES Please refer to HP website for hardware support status of hpssacli. Detailed support status can be queried via: * \fBlsm.Client.capabilities()\fR (Python API) * \fBlsm_capabilities()\fR (C API) * \fBlsmcli capabilities\fR (lsmcli command line). .SH FIREWALL RULES This plugin only execute \fBhpssacli\fR on localhost. No network connection required. .SH SEE ALSO \fIlsmcli\fR(1), \fIlsmd\fR(1), [1]http://downloads.linux.hp.com/SDR/project/spp/ .SH BUGS Please report bugs to \fI\fR .SH AUTHOR Gris Ge \fI\fR libstoragemgmt-1.2.3/doc/REST_API_Doc0000664000175000017500000000216612537546123014220 00000000000000RESTful API for LibStorageMgmt(LSM) Version 0.1 Currently, lsm_restd is still in inital stage. You could try: $ cd libstoragemgmt-git/src $ make lsm_restd $ ./lsm_restd # Open another shell $ curl http://localhost:8888/v0.1/systems?uri=sim: $ curl http://localhost:8888/v0.1/pools?uri=sim: $ curl http://localhost:8888/v0.1/volumes?uri=sim: $ curl http://localhost:8888/v0.1/disks?uri=sim: ################### Plan ####################### # 1. HTTP Methods GET List certain resource PUT Replace existing resource POST Create new resource DELETE Delete existing resource OPTIONS Response the support HTTP methods and document URL for certain URI # 2. Resources Resouce Method URI System GET, OPTIONS /v0.1/systems Pool GET, PUT, POST, DELETE, OPTIONS /v0.1/pools Volume GET, PUT, POST, DELETE, OPTIONS /v0.1/volumes Please refer to [[DataStucture]] # 3. Links ## 3.1 System ### 3.1.1 GET /v0.1/system Query all the storage systems ### 3.1.2 OPTIONS /v0.1/system ## 3.2 Pool libstoragemgmt-1.2.3/configure.ac0000664000175000017500000002564512542455432013770 00000000000000dnl Process this file with autoconf to produce a configure script. dnl Copyright (C) 2011 Red Hat, Inc. dnl See COPYING.LIB for the License of this software AC_INIT( [libstoragemgmt], [1.2.3], [libstoragemgmt-devel@lists.fedorahosted.org], [], [https://github.com/libstorage/libstoragemgmt/]) AC_CONFIG_SRCDIR([configure.ac]) AC_CONFIG_AUX_DIR([build-aux]) AC_CONFIG_HEADERS([config.h]) AC_CONFIG_MACRO_DIR([m4]) dnl Make automake keep quiet about wildcards & other GNUmake-isms AM_INIT_AUTOMAKE([-Wno-portability subdir-objects]) AM_MAINTAINER_MODE([enable]) # Enable silent build when available (Automake 1.11) m4_ifdef([AM_SILENT_RULES],[AM_SILENT_RULES([yes])]) PKG_PROG_PKG_CONFIG AC_CANONICAL_HOST LIBSM_MAJOR_VERSION=`echo $VERSION | awk -F. '{print $1}'` LIBSM_MINOR_VERSION=`echo $VERSION | awk -F. '{print $2}'` LIBSM_MICRO_VERSION=`echo $VERSION | awk -F. '{print $3}'` LIBSM_VERSION=$LIBSM_MAJOR_VERSION.$LIBSM_MINOR_VERSION.$LIBSM_MICRO_VERSION$LIBSM_MICRO_VERSION_SUFFIX LIBSM_VERSION_INFO=`expr $LIBSM_MAJOR_VERSION + $LIBSM_MINOR_VERSION`:$LIBSM_MICRO_VERSION:$LIBSM_MINOR_VERSION LIBSM_VERSION_NUMBER=`expr $LIBSM_MAJOR_VERSION \* 1000000 + $LIBSM_MINOR_VERSION \* 1000 + $LIBSM_MICRO_VERSION` # Our intention is that we will always be backward compatible. Thus we will # set the library version in such a way so that we will always be # libstoragemgmt.so.1.n.n once we officially release our ver 1.0.0. # # To make this happen we will use the minor version as the libtool current and # age set to minor - 1 and the micro used for revision. Basically this will get # us what we expect while utilizing the libtool revision system. # # For this to work we need to make sure that when we add to the interface we # increment minor and set micro to 0. If we make a code change which doesn't # change the API we can just bump micro. # # 0.1.0 -> libstoragemgmt.so.0.1.0 # 1.0.0 -> libstoragemgmt.so.1.0.0 # 1.1.0 -> libstoragemgmt.so.1.1.0 # 1.1.1 -> libstoragemgmt.so.1.1.1 CURRENT=`expr $LIBSM_MAJOR_VERSION '*' 1 + $LIBSM_MINOR_VERSION` AGE=$LIBSM_MINOR_VERSION REVISION=$LIBSM_MICRO_VERSION LIBSM_LIBTOOL_VERSION=$CURRENT:$REVISION:$AGE AC_SUBST([LIBSM_MAJOR_VERSION]) AC_SUBST([LIBSM_MINOR_VERSION]) AC_SUBST([LIBSM_MICRO_VERSION]) AC_SUBST([LIBSM_VERSION]) AC_SUBST([LIBSM_VERSION_INFO]) AC_SUBST([LIBSM_VERSION_NUMBER]) AC_SUBST([LIBSM_LIBTOOL_VERSION]) dnl Required minimum versions of all libs we depend on LIBXML_REQUIRED="2.5.0" dnl Checks for C compiler. AC_PROG_CC AC_PROG_CXX AC_PROG_AWK AC_PROG_INSTALL AC_PROG_LN_S AC_PROG_MAKE_SET AC_PROG_CPP AM_PROG_CC_STDC AM_PROG_LIBTOOL AM_PROG_CC_C_O AM_PROG_LD AC_CHECK_HEADERS([stdint.h stdlib.h string.h sys/socket.h syslog.h unistd.h]) AC_LANG_PUSH([C++]) AC_COMPILE_IFELSE([AC_LANG_SOURCE([int i;])], [], [AC_MSG_ERROR([C++ compiler missing or inoperational])]) AC_LANG_POP([C++]) #Make sure all types are covered AC_HEADER_STDBOOL AC_TYPE_INT32_T AC_TYPE_INT64_T AC_TYPE_SIZE_T AC_TYPE_SSIZE_T AC_TYPE_UINT32_T AC_TYPE_UINT64_T AC_TYPE_UINT8_T AC_FUNC_ERROR_AT_LINE AC_FUNC_MALLOC AC_FUNC_REALLOC AC_CHECK_FUNCS([getpass memset socket strchr strdup strtol strtoul]) dnl Availability of various common headers (non-fatal if missing). AC_CHECK_HEADERS([dlfcn.h]) #Check for openssl development libs, we are using in simc_lsmplugin AC_CHECK_LIB([crypto], [MD5_Final], [SSL_LIBS=-lcrypto], AC_MSG_ERROR([Missing openssl-devel libraries])) AC_SUBST([SSL_LIBS]) #Check for json parser yajl AC_CHECK_HEADERS([yajl/yajl_gen.h yajl/yajl_parse.h], [] , AC_MSG_ERROR([Missing yajl development headers]) ) AC_CHECK_HEADERS([yajl/yajl_version.h]) AC_CHECK_LIB([yajl], [yajl_parse], [YAJL_LIBS=-lyajl], AC_MSG_ERROR([Missing yajl library])) AC_SUBST([YAJL_LIBS]) dnl if --prefix is /usr, don't use /usr/var for localstatedir dnl or /usr/etc for sysconfdir dnl as this makes a lot of things break in testing situations if test "$prefix" = "/usr" && test "$localstatedir" = '${prefix}/var' ; then localstatedir='/var' fi if test "$prefix" = "/usr" && test "$sysconfdir" = '${prefix}/etc' ; then sysconfdir='/etc' fi dnl ========================================================================== dnl find libxml2 library, borrowed from xmlsec dnl ========================================================================== PKG_CHECK_MODULES([LIBXML], [libxml-2.0]) PKG_CHECK_MODULES([LIBGLIB], [glib-2.0 >= 2.22.5]) want_c_unit="yes" AC_ARG_ENABLE([build-c-unit], [AC_HELP_STRING([--disable-build-c-unit], [disable building C unit test case.])], [want_c_unit=${enableval}], []) if test "x${want_c_unit}" = "xyes"; then PKG_CHECK_MODULES([LIBCHECK], [check >= 0.9.8 ]) fi AM_CONDITIONAL([BUILD_C_UNIT], [test "x${want_c_unit}" = "xyes"]) dnl ========================================================================= dnl Check for perl, used for code constants checking dnl ========================================================================= AC_PATH_PROGS(PERL, perl) dnl ========================================================================== dnl Check for python2 as it is needed for the base cmd line function dnl ========================================================================== AC_PATH_PROGS(PYTHON, python2.7 python2.6 python, [Python is required]) AC_MSG_CHECKING([Check for Python major version]) PYTHON_MAJOR_VERSION=`$PYTHON -c "import sys; print(sys.version_info[[0]])"` case "$PYTHON_MAJOR_VERSION" in 2) ;; *) AC_MSG_ERROR( [we need Python version 2.x but found $PYTHON_MAJOR_VERSION.x]) ;; esac AC_MSG_RESULT([$PYTHON_MAJOR_VERSION]) AM_PATH_PYTHON([2.6], [], AC_MSG_ERROR([Python interpreter 2.6 or 2.7 required]) ) AC_PYTHON_MODULE([pywbem], [Required]) AC_PYTHON_MODULE([M2Crypto], [Required]) AC_PYTHON_MODULE([argparse], [Required]) dnl ========================================================================== dnl Check for libmicrohttpd and json-c as it is needed for REST API daemon dnl ========================================================================== AC_ARG_WITH([rest-api], [AS_HELP_STRING([--without-rest-api], [Do not build the REST API daemon])], [], [with_rest_api=yes]) if test "x$with_rest_api" = "xyes"; then PKG_CHECK_MODULES([LIBMICROHTTPD], [libmicrohttpd >= 0.9]) PKG_CHECK_MODULES([JSON], [json >= 0.10], [], [ PKG_CHECK_MODULES([JSON], [json-c], [], [ AC_MSG_ERROR([json-c development libraries 0.10 or later required]) ]) ]) fi AM_CONDITIONAL([WITH_REST_API], [test "x$with_rest_api" = "xyes"]) dnl ========================================================================== dnl Add option '--without-megaraid' to exclude megaraid plugin. dnl ========================================================================== AC_ARG_WITH([megaraid], [AS_HELP_STRING([--without-megaraid], [Do not build the MegaRAID plugin])], [], [with_megaraid=yes]) AM_CONDITIONAL([WITH_MEGARAID], [test "x$with_megaraid" = "xyes"]) dnl ========================================================================== dnl Add option '--without-hpsa' to exclude hpsa plugin. dnl ========================================================================== AC_ARG_WITH([hpsa], [AS_HELP_STRING([--without-hpsa], [Do not build the HP SmartArray plugin])], [], [with_hpsa=yes]) AM_CONDITIONAL([WITH_HPSA], [test "x$with_hpsa" = "xyes"]) dnl ========================================================================== dnl Check for libconfig as it is needed for lsmd daemon dnl ========================================================================== PKG_CHECK_MODULES( [LIBCONFIG], [libconfig >= 1.3.2],, AC_MSG_ERROR([libconfig 1.3.2 or newer not found.]) ) dnl ========================================================================== dnl Add option '--without-bash-completion' to exclude bash completion script. dnl ========================================================================== AC_ARG_WITH([bash-completion], [AS_HELP_STRING([--without-bash-completion], [Do not install the bash auto-completion script])], [], [with_bash_completion=yes]) AM_CONDITIONAL( [WITH_BASH_COMPLETION], [test "x$with_bash_completion" = "xyes"]) dnl ========================================================================== dnl Add option '--with-bash-completion-dir' to specific bash completion dir, dnl if not defined, if pkg-config file for bash-completion found, use its dnl 'completionsdir', else use /etc/bash_completion.d dnl ========================================================================== AC_ARG_WITH([bash-completion-dir], AS_HELP_STRING( [--with-bash-completion-dir=DIR], [Bash completions directory]), [], [AS_IF( [$($PKG_CONFIG --exists bash-completion)], [with_bash_completion_dir=$( $PKG_CONFIG --variable=completionsdir bash-completion)], # EPEL 6 is still shipping bash-completion version 1.x # which does not provide pkg-config support. # So, for EPEL 6 or anyone not installed bash-compeltion, we # use fallback folder '/etc/bash_completion.d' [with_bash_completion_dir=$sysconfdir/bash_completion.d])]) AC_SUBST([bashcompletiondir], [$with_bash_completion_dir]) #Setup the unit directory for systemd stuff PKG_PROG_PKG_CONFIG AC_ARG_WITH([systemdsystemunitdir], AS_HELP_STRING([--with-systemdsystemunitdir=DIR], [Directory for systemd service files]), [], [with_systemdsystemunitdir=$($PKG_CONFIG --variable=systemdsystemunitdir systemd)]) if test "x$with_systemdsystemunitdir" != xno; then AC_SUBST([systemdsystemunitdir], [$with_systemdsystemunitdir]) fi AM_CONDITIONAL(HAVE_SYSTEMD, [test -n "$with_systemdsystemunitdir" -a "x$with_systemdsystemunitdir" != xno ]) AC_OUTPUT(libstoragemgmt.pc \ Makefile \ c_binding/Makefile \ c_binding/include/Makefile \ c_binding/include/libstoragemgmt/Makefile \ c_binding/include/libstoragemgmt/libstoragemgmt_version.h \ python_binding/Makefile \ python_binding/lsm/version.py \ plugin/Makefile \ plugin/simc/Makefile \ plugin/megaraid/Makefile \ plugin/hpsa/Makefile \ daemon/Makefile \ config/Makefile \ doc/Makefile \ doc/man/lsmcli.1 \ doc/man/lsmd.1 \ doc/man/sim_lsmplugin.1 \ doc/man/simc_lsmplugin.1 \ doc/man/smispy_lsmplugin.1 \ doc/man/ontap_lsmplugin.1 \ doc/man/targetd_lsmplugin.1 \ doc/man/nstor_lsmplugin.1 \ doc/doxygen.conf \ doc/man/lsmd.conf.5 \ doc/man/megaraid_lsmplugin.1 \ doc/man/hpsa_lsmplugin.1 \ tools/Makefile \ tools/udev/Makefile \ tools/lsmcli/Makefile \ tools/utility/Makefile \ tools/bash_completion/Makefile \ packaging/Makefile \ packaging/daemon/Makefile \ packaging/libstoragemgmt.spec \ doc/man/Makefile \ test/Makefile) libstoragemgmt-1.2.3/AUTHORS0000664000175000017500000000207212537546123012541 00000000000000 libStorageMgmt Authors The libStorageMgmt project was initiated by: Tony Asleson The primary maintainer(s) and people with commit access rights: Tony Asleson Andy Grover Gris Ge Ma Shimiao Patches have also been contributed by: Daniel Veillard Christophe Fergeau Eduardo Lima Dustin Schoenbrun Ritesh Raj Sarraf Andy Grover Anatoly Legkodymov Deepak C Shetty Gris Ge Ma Shimiao Ewan Milne Charles Rose [....send patches to get your name here....] libstoragemgmt-1.2.3/Makefile.in0000664000175000017500000007232612542455445013551 00000000000000# Makefile.in generated by automake 1.13.4 from Makefile.am. # @configure_input@ # Copyright (C) 1994-2013 Free Software Foundation, Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__is_gnu_make = test -n '$(MAKEFILE_LIST)' && test -n '$(MAKELEVEL)' am__make_running_with_option = \ case $${target_option-} in \ ?) ;; \ *) echo "am__make_running_with_option: internal error: invalid" \ "target option '$${target_option-}' specified" >&2; \ exit 1;; \ esac; \ has_opt=no; \ sane_makeflags=$$MAKEFLAGS; \ if $(am__is_gnu_make); then \ sane_makeflags=$$MFLAGS; \ else \ case $$MAKEFLAGS in \ *\\[\ \ ]*) \ bs=\\; \ sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \ | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \ esac; \ fi; \ skip_next=no; \ strip_trailopt () \ { \ flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \ }; \ for flg in $$sane_makeflags; do \ test $$skip_next = yes && { skip_next=no; continue; }; \ case $$flg in \ *=*|--*) continue;; \ -*I) strip_trailopt 'I'; skip_next=yes;; \ -*I?*) strip_trailopt 'I';; \ -*O) strip_trailopt 'O'; skip_next=yes;; \ -*O?*) strip_trailopt 'O';; \ -*l) strip_trailopt 'l'; skip_next=yes;; \ -*l?*) strip_trailopt 'l';; \ -[dEDm]) skip_next=yes;; \ -[JT]) skip_next=yes;; \ esac; \ case $$flg in \ *$$target_option*) has_opt=yes; break;; \ esac; \ done; \ test $$has_opt = yes am__make_dryrun = (target_option=n; $(am__make_running_with_option)) am__make_keepgoing = (target_option=k; $(am__make_running_with_option)) pkgdatadir = $(datadir)/@PACKAGE@ pkgincludedir = $(includedir)/@PACKAGE@ pkglibdir = $(libdir)/@PACKAGE@ pkglibexecdir = $(libexecdir)/@PACKAGE@ am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ @BUILD_C_UNIT_TRUE@am__append_1 = test subdir = . DIST_COMMON = INSTALL NEWS README AUTHORS ChangeLog \ $(srcdir)/Makefile.in $(srcdir)/Makefile.am \ $(top_srcdir)/configure $(am__configure_deps) \ $(srcdir)/config.h.in $(srcdir)/libstoragemgmt.pc.in \ $(top_srcdir)/python_binding/lsm/version.py.in COPYING.LIB \ build-aux/compile build-aux/config.guess build-aux/config.sub \ build-aux/depcomp build-aux/install-sh build-aux/missing \ build-aux/py-compile build-aux/ltmain.sh \ $(top_srcdir)/build-aux/compile \ $(top_srcdir)/build-aux/config.guess \ $(top_srcdir)/build-aux/config.sub \ $(top_srcdir)/build-aux/install-sh \ $(top_srcdir)/build-aux/ltmain.sh \ $(top_srcdir)/build-aux/missing ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = $(top_srcdir)/m4/ax_python_module.m4 \ $(top_srcdir)/m4/libtool.m4 $(top_srcdir)/m4/ltoptions.m4 \ $(top_srcdir)/m4/ltsugar.m4 $(top_srcdir)/m4/ltversion.m4 \ $(top_srcdir)/m4/lt~obsolete.m4 $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) am__CONFIG_DISTCLEAN_FILES = config.status config.cache config.log \ configure.lineno config.status.lineno mkinstalldirs = $(install_sh) -d CONFIG_HEADER = config.h CONFIG_CLEAN_FILES = libstoragemgmt.pc python_binding/lsm/version.py CONFIG_CLEAN_VPATH_FILES = AM_V_P = $(am__v_P_@AM_V@) am__v_P_ = $(am__v_P_@AM_DEFAULT_V@) am__v_P_0 = false am__v_P_1 = : AM_V_GEN = $(am__v_GEN_@AM_V@) am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@) am__v_GEN_0 = @echo " GEN " $@; am__v_GEN_1 = AM_V_at = $(am__v_at_@AM_V@) am__v_at_ = $(am__v_at_@AM_DEFAULT_V@) am__v_at_0 = @ am__v_at_1 = SOURCES = DIST_SOURCES = RECURSIVE_TARGETS = all-recursive check-recursive cscopelist-recursive \ ctags-recursive dvi-recursive html-recursive info-recursive \ install-data-recursive install-dvi-recursive \ install-exec-recursive install-html-recursive \ install-info-recursive install-pdf-recursive \ install-ps-recursive install-recursive installcheck-recursive \ installdirs-recursive pdf-recursive ps-recursive \ tags-recursive uninstall-recursive am__can_run_installinfo = \ case $$AM_UPDATE_INFO_DIR in \ n|no|NO) false;; \ *) (install-info --version) >/dev/null 2>&1;; \ esac am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; am__vpath_adj = case $$p in \ $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ *) f=$$p;; \ esac; am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`; am__install_max = 40 am__nobase_strip_setup = \ srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'` am__nobase_strip = \ for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||" am__nobase_list = $(am__nobase_strip_setup); \ for p in $$list; do echo "$$p $$p"; done | \ sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \ $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \ if (++n[$$2] == $(am__install_max)) \ { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \ END { for (dir in files) print dir, files[dir] }' am__base_list = \ sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \ sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g' am__uninstall_files_from_dir = { \ test -z "$$files" \ || { test ! -d "$$dir" && test ! -f "$$dir" && test ! -r "$$dir"; } \ || { echo " ( cd '$$dir' && rm -f" $$files ")"; \ $(am__cd) "$$dir" && rm -f $$files; }; \ } am__installdirs = "$(DESTDIR)$(pkgconfigdir)" DATA = $(pkgconfig_DATA) RECURSIVE_CLEAN_TARGETS = mostlyclean-recursive clean-recursive \ distclean-recursive maintainer-clean-recursive am__recursive_targets = \ $(RECURSIVE_TARGETS) \ $(RECURSIVE_CLEAN_TARGETS) \ $(am__extra_recursive_targets) AM_RECURSIVE_TARGETS = $(am__recursive_targets:-recursive=) TAGS CTAGS \ cscope distdir dist dist-all distcheck am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) \ $(LISP)config.h.in # Read a list of newline-separated strings from the standard input, # and print each of them once, without duplicates. Input order is # *not* preserved. am__uniquify_input = $(AWK) '\ BEGIN { nonempty = 0; } \ { items[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in items) print i; }; } \ ' # Make sure the list of sources is unique. This is necessary because, # e.g., the same source file might be shared among _SOURCES variables # for different programs/libraries. am__define_uniq_tagged_files = \ list='$(am__tagged_files)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | $(am__uniquify_input)` ETAGS = etags CTAGS = ctags CSCOPE = cscope DIST_SUBDIRS = c_binding python_binding plugin doc tools daemon \ packaging config test DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) distdir = $(PACKAGE)-$(VERSION) top_distdir = $(distdir) am__remove_distdir = \ if test -d "$(distdir)"; then \ find "$(distdir)" -type d ! -perm -200 -exec chmod u+w {} ';' \ && rm -rf "$(distdir)" \ || { sleep 5 && rm -rf "$(distdir)"; }; \ else :; fi am__post_remove_distdir = $(am__remove_distdir) am__relativize = \ dir0=`pwd`; \ sed_first='s,^\([^/]*\)/.*$$,\1,'; \ sed_rest='s,^[^/]*/*,,'; \ sed_last='s,^.*/\([^/]*\)$$,\1,'; \ sed_butlast='s,/*[^/]*$$,,'; \ while test -n "$$dir1"; do \ first=`echo "$$dir1" | sed -e "$$sed_first"`; \ if test "$$first" != "."; then \ if test "$$first" = ".."; then \ dir2=`echo "$$dir0" | sed -e "$$sed_last"`/"$$dir2"; \ dir0=`echo "$$dir0" | sed -e "$$sed_butlast"`; \ else \ first2=`echo "$$dir2" | sed -e "$$sed_first"`; \ if test "$$first2" = "$$first"; then \ dir2=`echo "$$dir2" | sed -e "$$sed_rest"`; \ else \ dir2="../$$dir2"; \ fi; \ dir0="$$dir0"/"$$first"; \ fi; \ fi; \ dir1=`echo "$$dir1" | sed -e "$$sed_rest"`; \ done; \ reldir="$$dir2" DIST_ARCHIVES = $(distdir).tar.gz GZIP_ENV = --best DIST_TARGETS = dist-gzip distuninstallcheck_listfiles = find . -type f -print am__distuninstallcheck_listfiles = $(distuninstallcheck_listfiles) \ | sed 's|^\./|$(prefix)/|' | grep -v '$(infodir)/dir$$' distcleancheck_listfiles = find . -type f -print ACLOCAL = @ACLOCAL@ AMTAR = @AMTAR@ AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@ AR = @AR@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ FGREP = @FGREP@ GREP = @GREP@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ JSON_CFLAGS = @JSON_CFLAGS@ JSON_LIBS = @JSON_LIBS@ LD = @LD@ LDFLAGS = @LDFLAGS@ LIBCHECK_CFLAGS = @LIBCHECK_CFLAGS@ LIBCHECK_LIBS = @LIBCHECK_LIBS@ LIBCONFIG_CFLAGS = @LIBCONFIG_CFLAGS@ LIBCONFIG_LIBS = @LIBCONFIG_LIBS@ LIBGLIB_CFLAGS = @LIBGLIB_CFLAGS@ LIBGLIB_LIBS = @LIBGLIB_LIBS@ LIBMICROHTTPD_CFLAGS = @LIBMICROHTTPD_CFLAGS@ LIBMICROHTTPD_LIBS = @LIBMICROHTTPD_LIBS@ LIBOBJS = @LIBOBJS@ LIBS = @LIBS@ LIBSM_LIBTOOL_VERSION = @LIBSM_LIBTOOL_VERSION@ LIBSM_MAJOR_VERSION = @LIBSM_MAJOR_VERSION@ LIBSM_MICRO_VERSION = @LIBSM_MICRO_VERSION@ LIBSM_MINOR_VERSION = @LIBSM_MINOR_VERSION@ LIBSM_VERSION = @LIBSM_VERSION@ LIBSM_VERSION_INFO = @LIBSM_VERSION_INFO@ LIBSM_VERSION_NUMBER = @LIBSM_VERSION_NUMBER@ LIBTOOL = @LIBTOOL@ LIBXML_CFLAGS = @LIBXML_CFLAGS@ LIBXML_LIBS = @LIBXML_LIBS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBOBJS = @LTLIBOBJS@ MAINT = @MAINT@ MAKEINFO = @MAKEINFO@ MANIFEST_TOOL = @MANIFEST_TOOL@ MKDIR_P = @MKDIR_P@ NM = @NM@ NMEDIT = @NMEDIT@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_URL = @PACKAGE_URL@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PERL = @PERL@ PKG_CONFIG = @PKG_CONFIG@ PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@ PKG_CONFIG_PATH = @PKG_CONFIG_PATH@ PYTHON = @PYTHON@ PYTHON_EXEC_PREFIX = @PYTHON_EXEC_PREFIX@ PYTHON_PLATFORM = @PYTHON_PLATFORM@ PYTHON_PREFIX = @PYTHON_PREFIX@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SSL_LIBS = @SSL_LIBS@ STRIP = @STRIP@ VERSION = @VERSION@ YAJL_LIBS = @YAJL_LIBS@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_AR = @ac_ct_AR@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ bashcompletiondir = @bashcompletiondir@ bindir = @bindir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ datadir = @datadir@ datarootdir = @datarootdir@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ install_sh = @install_sh@ libdir = @libdir@ libexecdir = @libexecdir@ localedir = @localedir@ localstatedir = @localstatedir@ mandir = @mandir@ mkdir_p = @mkdir_p@ oldincludedir = @oldincludedir@ pdfdir = @pdfdir@ pkgpyexecdir = @pkgpyexecdir@ pkgpythondir = @pkgpythondir@ prefix = @prefix@ program_transform_name = @program_transform_name@ psdir = @psdir@ pyexecdir = @pyexecdir@ pythondir = @pythondir@ sbindir = @sbindir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ systemdsystemunitdir = @systemdsystemunitdir@ target_alias = @target_alias@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ ACLOCAL_AMFLAGS = -I m4 DISTCHECK_CONFIGURE_FLAGS = --with-systemdsystemunitdir=$$dc_install_base/$(systemdsystemunitdir) \ --with-bash-completion-dir=$$dc_install_base/$(bashcompletiondir) SUBDIRS = c_binding python_binding plugin doc tools daemon packaging \ config $(am__append_1) EXTRA_DIST = \ libstoragemgmt.pc.in \ libstoragemgmt.pc \ autogen.sh pkgconfigdir = $(libdir)/pkgconfig pkgconfig_DATA = libstoragemgmt.pc MAINTAINERCLEANFILES = .git-module-status all: config.h $(MAKE) $(AM_MAKEFLAGS) all-recursive .SUFFIXES: am--refresh: Makefile @: $(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ echo ' cd $(srcdir) && $(AUTOMAKE) --gnu'; \ $(am__cd) $(srcdir) && $(AUTOMAKE) --gnu \ && exit 0; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --gnu Makefile .PRECIOUS: Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ echo ' $(SHELL) ./config.status'; \ $(SHELL) ./config.status;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $@ $(am__depfiles_maybe)'; \ cd $(top_builddir) && $(SHELL) ./config.status $@ $(am__depfiles_maybe);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) $(SHELL) ./config.status --recheck $(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps) $(am__cd) $(srcdir) && $(AUTOCONF) $(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps) $(am__cd) $(srcdir) && $(ACLOCAL) $(ACLOCAL_AMFLAGS) $(am__aclocal_m4_deps): config.h: stamp-h1 @if test ! -f $@; then rm -f stamp-h1; else :; fi @if test ! -f $@; then $(MAKE) $(AM_MAKEFLAGS) stamp-h1; else :; fi stamp-h1: $(srcdir)/config.h.in $(top_builddir)/config.status @rm -f stamp-h1 cd $(top_builddir) && $(SHELL) ./config.status config.h $(srcdir)/config.h.in: @MAINTAINER_MODE_TRUE@ $(am__configure_deps) ($(am__cd) $(top_srcdir) && $(AUTOHEADER)) rm -f stamp-h1 touch $@ distclean-hdr: -rm -f config.h stamp-h1 libstoragemgmt.pc: $(top_builddir)/config.status $(srcdir)/libstoragemgmt.pc.in cd $(top_builddir) && $(SHELL) ./config.status $@ python_binding/lsm/version.py: $(top_builddir)/config.status $(top_srcdir)/python_binding/lsm/version.py.in cd $(top_builddir) && $(SHELL) ./config.status $@ mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs distclean-libtool: -rm -f libtool config.lt install-pkgconfigDATA: $(pkgconfig_DATA) @$(NORMAL_INSTALL) @list='$(pkgconfig_DATA)'; test -n "$(pkgconfigdir)" || list=; \ if test -n "$$list"; then \ echo " $(MKDIR_P) '$(DESTDIR)$(pkgconfigdir)'"; \ $(MKDIR_P) "$(DESTDIR)$(pkgconfigdir)" || exit 1; \ fi; \ for p in $$list; do \ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ echo "$$d$$p"; \ done | $(am__base_list) | \ while read files; do \ echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(pkgconfigdir)'"; \ $(INSTALL_DATA) $$files "$(DESTDIR)$(pkgconfigdir)" || exit $$?; \ done uninstall-pkgconfigDATA: @$(NORMAL_UNINSTALL) @list='$(pkgconfig_DATA)'; test -n "$(pkgconfigdir)" || list=; \ files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ dir='$(DESTDIR)$(pkgconfigdir)'; $(am__uninstall_files_from_dir) # This directory's subdirectories are mostly independent; you can cd # into them and run 'make' without going through this Makefile. # To change the values of 'make' variables: instead of editing Makefiles, # (1) if the variable is set in 'config.status', edit 'config.status' # (which will cause the Makefiles to be regenerated when you run 'make'); # (2) otherwise, pass the desired values on the 'make' command line. $(am__recursive_targets): @fail=; \ if $(am__make_keepgoing); then \ failcom='fail=yes'; \ else \ failcom='exit 1'; \ fi; \ dot_seen=no; \ target=`echo $@ | sed s/-recursive//`; \ case "$@" in \ distclean-* | maintainer-clean-*) list='$(DIST_SUBDIRS)' ;; \ *) list='$(SUBDIRS)' ;; \ esac; \ for subdir in $$list; do \ echo "Making $$target in $$subdir"; \ if test "$$subdir" = "."; then \ dot_seen=yes; \ local_target="$$target-am"; \ else \ local_target="$$target"; \ fi; \ ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \ || eval $$failcom; \ done; \ if test "$$dot_seen" = "no"; then \ $(MAKE) $(AM_MAKEFLAGS) "$$target-am" || exit 1; \ fi; test -z "$$fail" ID: $(am__tagged_files) $(am__define_uniq_tagged_files); mkid -fID $$unique tags: tags-recursive TAGS: tags tags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files) set x; \ here=`pwd`; \ if ($(ETAGS) --etags-include --version) >/dev/null 2>&1; then \ include_option=--etags-include; \ empty_fix=.; \ else \ include_option=--include; \ empty_fix=; \ fi; \ list='$(SUBDIRS)'; for subdir in $$list; do \ if test "$$subdir" = .; then :; else \ test ! -f $$subdir/TAGS || \ set "$$@" "$$include_option=$$here/$$subdir/TAGS"; \ fi; \ done; \ $(am__define_uniq_tagged_files); \ shift; \ if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ if test $$# -gt 0; then \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ "$$@" $$unique; \ else \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ $$unique; \ fi; \ fi ctags: ctags-recursive CTAGS: ctags ctags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files) $(am__define_uniq_tagged_files); \ test -z "$(CTAGS_ARGS)$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$unique GTAGS: here=`$(am__cd) $(top_builddir) && pwd` \ && $(am__cd) $(top_srcdir) \ && gtags -i $(GTAGS_ARGS) "$$here" cscope: cscope.files test ! -s cscope.files \ || $(CSCOPE) -b -q $(AM_CSCOPEFLAGS) $(CSCOPEFLAGS) -i cscope.files $(CSCOPE_ARGS) clean-cscope: -rm -f cscope.files cscope.files: clean-cscope cscopelist cscopelist: cscopelist-recursive cscopelist-am: $(am__tagged_files) list='$(am__tagged_files)'; \ case "$(srcdir)" in \ [\\/]* | ?:[\\/]*) sdir="$(srcdir)" ;; \ *) sdir=$(subdir)/$(srcdir) ;; \ esac; \ for i in $$list; do \ if test -f "$$i"; then \ echo "$(subdir)/$$i"; \ else \ echo "$$sdir/$$i"; \ fi; \ done >> $(top_builddir)/cscope.files distclean-tags: -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags -rm -f cscope.out cscope.in.out cscope.po.out cscope.files distdir: $(DISTFILES) $(am__remove_distdir) test -d "$(distdir)" || mkdir "$(distdir)" @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done @list='$(DIST_SUBDIRS)'; for subdir in $$list; do \ if test "$$subdir" = .; then :; else \ $(am__make_dryrun) \ || test -d "$(distdir)/$$subdir" \ || $(MKDIR_P) "$(distdir)/$$subdir" \ || exit 1; \ dir1=$$subdir; dir2="$(distdir)/$$subdir"; \ $(am__relativize); \ new_distdir=$$reldir; \ dir1=$$subdir; dir2="$(top_distdir)"; \ $(am__relativize); \ new_top_distdir=$$reldir; \ echo " (cd $$subdir && $(MAKE) $(AM_MAKEFLAGS) top_distdir="$$new_top_distdir" distdir="$$new_distdir" \\"; \ echo " am__remove_distdir=: am__skip_length_check=: am__skip_mode_fix=: distdir)"; \ ($(am__cd) $$subdir && \ $(MAKE) $(AM_MAKEFLAGS) \ top_distdir="$$new_top_distdir" \ distdir="$$new_distdir" \ am__remove_distdir=: \ am__skip_length_check=: \ am__skip_mode_fix=: \ distdir) \ || exit 1; \ fi; \ done -test -n "$(am__skip_mode_fix)" \ || find "$(distdir)" -type d ! -perm -755 \ -exec chmod u+rwx,go+rx {} \; -o \ ! -type d ! -perm -444 -links 1 -exec chmod a+r {} \; -o \ ! -type d ! -perm -400 -exec chmod a+r {} \; -o \ ! -type d ! -perm -444 -exec $(install_sh) -c -m a+r {} {} \; \ || chmod -R a+r "$(distdir)" dist-gzip: distdir tardir=$(distdir) && $(am__tar) | GZIP=$(GZIP_ENV) gzip -c >$(distdir).tar.gz $(am__post_remove_distdir) dist-bzip2: distdir tardir=$(distdir) && $(am__tar) | BZIP2=$${BZIP2--9} bzip2 -c >$(distdir).tar.bz2 $(am__post_remove_distdir) dist-lzip: distdir tardir=$(distdir) && $(am__tar) | lzip -c $${LZIP_OPT--9} >$(distdir).tar.lz $(am__post_remove_distdir) dist-xz: distdir tardir=$(distdir) && $(am__tar) | XZ_OPT=$${XZ_OPT--e} xz -c >$(distdir).tar.xz $(am__post_remove_distdir) dist-tarZ: distdir tardir=$(distdir) && $(am__tar) | compress -c >$(distdir).tar.Z $(am__post_remove_distdir) dist-shar: distdir shar $(distdir) | GZIP=$(GZIP_ENV) gzip -c >$(distdir).shar.gz $(am__post_remove_distdir) dist-zip: distdir -rm -f $(distdir).zip zip -rq $(distdir).zip $(distdir) $(am__post_remove_distdir) dist dist-all: $(MAKE) $(AM_MAKEFLAGS) $(DIST_TARGETS) am__post_remove_distdir='@:' $(am__post_remove_distdir) # This target untars the dist file and tries a VPATH configuration. Then # it guarantees that the distribution is self-contained by making another # tarfile. distcheck: dist case '$(DIST_ARCHIVES)' in \ *.tar.gz*) \ GZIP=$(GZIP_ENV) gzip -dc $(distdir).tar.gz | $(am__untar) ;;\ *.tar.bz2*) \ bzip2 -dc $(distdir).tar.bz2 | $(am__untar) ;;\ *.tar.lz*) \ lzip -dc $(distdir).tar.lz | $(am__untar) ;;\ *.tar.xz*) \ xz -dc $(distdir).tar.xz | $(am__untar) ;;\ *.tar.Z*) \ uncompress -c $(distdir).tar.Z | $(am__untar) ;;\ *.shar.gz*) \ GZIP=$(GZIP_ENV) gzip -dc $(distdir).shar.gz | unshar ;;\ *.zip*) \ unzip $(distdir).zip ;;\ esac chmod -R a-w $(distdir) chmod u+w $(distdir) mkdir $(distdir)/_build $(distdir)/_inst chmod a-w $(distdir) test -d $(distdir)/_build || exit 0; \ dc_install_base=`$(am__cd) $(distdir)/_inst && pwd | sed -e 's,^[^:\\/]:[\\/],/,'` \ && dc_destdir="$${TMPDIR-/tmp}/am-dc-$$$$/" \ && am__cwd=`pwd` \ && $(am__cd) $(distdir)/_build \ && ../configure --srcdir=.. --prefix="$$dc_install_base" \ $(AM_DISTCHECK_CONFIGURE_FLAGS) \ $(DISTCHECK_CONFIGURE_FLAGS) \ && $(MAKE) $(AM_MAKEFLAGS) \ && $(MAKE) $(AM_MAKEFLAGS) dvi \ && $(MAKE) $(AM_MAKEFLAGS) check \ && $(MAKE) $(AM_MAKEFLAGS) install \ && $(MAKE) $(AM_MAKEFLAGS) installcheck \ && $(MAKE) $(AM_MAKEFLAGS) uninstall \ && $(MAKE) $(AM_MAKEFLAGS) distuninstallcheck_dir="$$dc_install_base" \ distuninstallcheck \ && chmod -R a-w "$$dc_install_base" \ && ({ \ (cd ../.. && umask 077 && mkdir "$$dc_destdir") \ && $(MAKE) $(AM_MAKEFLAGS) DESTDIR="$$dc_destdir" install \ && $(MAKE) $(AM_MAKEFLAGS) DESTDIR="$$dc_destdir" uninstall \ && $(MAKE) $(AM_MAKEFLAGS) DESTDIR="$$dc_destdir" \ distuninstallcheck_dir="$$dc_destdir" distuninstallcheck; \ } || { rm -rf "$$dc_destdir"; exit 1; }) \ && rm -rf "$$dc_destdir" \ && $(MAKE) $(AM_MAKEFLAGS) dist \ && rm -rf $(DIST_ARCHIVES) \ && $(MAKE) $(AM_MAKEFLAGS) distcleancheck \ && cd "$$am__cwd" \ || exit 1 $(am__post_remove_distdir) @(echo "$(distdir) archives ready for distribution: "; \ list='$(DIST_ARCHIVES)'; for i in $$list; do echo $$i; done) | \ sed -e 1h -e 1s/./=/g -e 1p -e 1x -e '$$p' -e '$$x' distuninstallcheck: @test -n '$(distuninstallcheck_dir)' || { \ echo 'ERROR: trying to run $@ with an empty' \ '$$(distuninstallcheck_dir)' >&2; \ exit 1; \ }; \ $(am__cd) '$(distuninstallcheck_dir)' || { \ echo 'ERROR: cannot chdir into $(distuninstallcheck_dir)' >&2; \ exit 1; \ }; \ test `$(am__distuninstallcheck_listfiles) | wc -l` -eq 0 \ || { echo "ERROR: files left after uninstall:" ; \ if test -n "$(DESTDIR)"; then \ echo " (check DESTDIR support)"; \ fi ; \ $(distuninstallcheck_listfiles) ; \ exit 1; } >&2 distcleancheck: distclean @if test '$(srcdir)' = . ; then \ echo "ERROR: distcleancheck can only run from a VPATH build" ; \ exit 1 ; \ fi @test `$(distcleancheck_listfiles) | wc -l` -eq 0 \ || { echo "ERROR: files left in build directory after distclean:" ; \ $(distcleancheck_listfiles) ; \ exit 1; } >&2 check-am: all-am check: check-recursive all-am: Makefile $(DATA) config.h installdirs: installdirs-recursive installdirs-am: for dir in "$(DESTDIR)$(pkgconfigdir)"; do \ test -z "$$dir" || $(MKDIR_P) "$$dir"; \ done install: install-recursive install-exec: install-exec-recursive install-data: install-data-recursive uninstall: uninstall-recursive install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-recursive install-strip: if test -z '$(STRIP)'; then \ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ install; \ else \ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ fi mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES) clean: clean-recursive clean-am: clean-generic clean-libtool mostlyclean-am distclean: distclean-recursive -rm -f $(am__CONFIG_DISTCLEAN_FILES) -rm -f Makefile distclean-am: clean-am distclean-generic distclean-hdr \ distclean-libtool distclean-tags dvi: dvi-recursive dvi-am: html: html-recursive html-am: info: info-recursive info-am: install-data-am: install-pkgconfigDATA install-dvi: install-dvi-recursive install-dvi-am: install-exec-am: install-html: install-html-recursive install-html-am: install-info: install-info-recursive install-info-am: install-man: install-pdf: install-pdf-recursive install-pdf-am: install-ps: install-ps-recursive install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-recursive -rm -f $(am__CONFIG_DISTCLEAN_FILES) -rm -rf $(top_srcdir)/autom4te.cache -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-recursive mostlyclean-am: mostlyclean-generic mostlyclean-libtool pdf: pdf-recursive pdf-am: ps: ps-recursive ps-am: uninstall-am: uninstall-pkgconfigDATA .MAKE: $(am__recursive_targets) all install-am install-strip .PHONY: $(am__recursive_targets) CTAGS GTAGS TAGS all all-am \ am--refresh check check-am clean clean-cscope clean-generic \ clean-libtool cscope cscopelist-am ctags ctags-am dist \ dist-all dist-bzip2 dist-gzip dist-lzip dist-shar dist-tarZ \ dist-xz dist-zip distcheck distclean distclean-generic \ distclean-hdr distclean-libtool distclean-tags distcleancheck \ distdir distuninstallcheck dvi dvi-am html html-am info \ info-am install install-am install-data install-data-am \ install-dvi install-dvi-am install-exec install-exec-am \ install-html install-html-am install-info install-info-am \ install-man install-pdf install-pdf-am install-pkgconfigDATA \ install-ps install-ps-am install-strip installcheck \ installcheck-am installdirs installdirs-am maintainer-clean \ maintainer-clean-generic mostlyclean mostlyclean-generic \ mostlyclean-libtool pdf pdf-am ps ps-am tags tags-am uninstall \ uninstall-am uninstall-pkgconfigDATA #Source code documentation docs: doxygen doc/doxygen.conf rpm: clean @(unset CDPATH ; $(MAKE) dist && rpmbuild -ta $(distdir).tar.gz) # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: libstoragemgmt-1.2.3/plugin/0000775000175000017500000000000012542455463013050 500000000000000libstoragemgmt-1.2.3/plugin/targetd/0000775000175000017500000000000012542455463014502 500000000000000libstoragemgmt-1.2.3/plugin/targetd/targetd_lsmplugin0000775000175000017500000000240712537737032020076 00000000000000#!/usr/bin/env python2 # Copyright (C) 2011-2013 Red Hat, Inc. # This library is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2.1 of the License, or any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library; If not, see . # # Author: Andy Grover import sys import syslog import traceback try: from lsm import PluginRunner from lsm.plugin.targetd.targetd import TargetdStorage if __name__ == '__main__': PluginRunner(TargetdStorage, sys.argv).run() except Exception: #This should be quite rare, but when it does happen this is pretty #key in understanding what happened, especially when it happens when #running from the daemon. msg = str(traceback.format_exc()) syslog.syslog(syslog.LOG_ERR, msg) sys.stderr.write(msg) sys.exit(1) libstoragemgmt-1.2.3/plugin/targetd/targetd.py0000664000175000017500000010443012537761730016431 00000000000000# Copyright (C) 2011-2014 Red Hat, Inc. # This library is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2.1 of the License, or any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library; If not, see . # # Author: Andy Grover # Gris Ge import copy from lsm import (Pool, Volume, System, Capabilities, IStorageAreaNetwork, INfs, FileSystem, FsSnapshot, NfsExport, LsmError, ErrorNumber, uri_parse, md5, VERSION, common_urllib2_error_handler, search_property, AccessGroup) import urllib2 import json import time import urlparse import socket import re DEFAULT_USER = "admin" DEFAULT_PORT = 18700 PATH = "/targetrpc" # Current sector size in liblvm _LVM_SECTOR_SIZE = 512 def handle_errors(method): def target_wrapper(*args, **kwargs): try: return method(*args, **kwargs) except TargetdError as te: raise LsmError(ErrorNumber.PLUGIN_BUG, "Got error %d from targetd: %s" % (te.errno, te.reason)) except LsmError: raise except Exception as e: common_urllib2_error_handler(e) return target_wrapper class TargetdError(Exception): VOLUME_MASKED = 303 INVALID_METHOD = 32601 INVALID_ARGUMENT = 32602 NAME_CONFLICT = 50 EXISTS_INITIATOR = 52 NO_FREE_HOST_LUN_ID = 1000 EMPTY_ACCESS_GROUP = 511 def __init__(self, errno, reason, *args, **kwargs): Exception.__init__(self, *args, **kwargs) self.errno = int(errno) self.reason = reason class TargetdStorage(IStorageAreaNetwork, INfs): _FAKE_AG_PREFIX = 'init.' _MAX_H_LUN_ID = 255 def __init__(self): self.uri = None self.password = None self.tmo = 0 self.rpc_id = 1 self.host_with_port = None self.scheme = None self.url = None self.headers = None self.system = System("targetd", "targetd storage appliance", System.STATUS_UNKNOWN, '') @handle_errors def plugin_register(self, uri, password, timeout, flags=0): self.uri = uri_parse(uri) self.password = password self.tmo = timeout self._flag_ag_support = True user = self.uri.get('username', DEFAULT_USER) port = self.uri.get('port', DEFAULT_PORT) self.host_with_port = "%s:%s" % (self.uri['host'], port) if self.uri['scheme'].lower() == 'targetd+ssl': self.scheme = 'https' else: self.scheme = 'http' self.url = urlparse.urlunsplit( (self.scheme, self.host_with_port, PATH, None, None)) auth = ('%s:%s' % (user, self.password)).encode('base64')[:-1] self.headers = {'Content-Type': 'application/json', 'Authorization': 'Basic %s' % (auth,)} try: self._jsonrequest('access_group_list') except TargetdError as te: if te.errno == TargetdError.INVALID_METHOD: self._flag_ag_support = False else: raise @handle_errors def time_out_set(self, ms, flags=0): self.tmo = ms @handle_errors def time_out_get(self, flags=0): return self.tmo @handle_errors def plugin_unregister(self, flags=0): pass @handle_errors def capabilities(self, system, flags=0): cap = Capabilities() cap.set(Capabilities.VOLUMES) cap.set(Capabilities.VOLUME_CREATE) cap.set(Capabilities.VOLUME_REPLICATE) cap.set(Capabilities.VOLUME_REPLICATE_COPY) cap.set(Capabilities.VOLUME_DELETE) cap.set(Capabilities.VOLUME_MASK) cap.set(Capabilities.VOLUME_UNMASK) cap.set(Capabilities.FS) cap.set(Capabilities.FS_CREATE) cap.set(Capabilities.FS_DELETE) cap.set(Capabilities.FS_CLONE) cap.set(Capabilities.FS_SNAPSHOT_CREATE) cap.set(Capabilities.FS_SNAPSHOT_DELETE) cap.set(Capabilities.FS_SNAPSHOTS) cap.set(Capabilities.EXPORT_AUTH) cap.set(Capabilities.EXPORTS) cap.set(Capabilities.EXPORT_FS) cap.set(Capabilities.EXPORT_REMOVE) cap.set(Capabilities.ACCESS_GROUPS) cap.set(Capabilities.ACCESS_GROUPS_GRANTED_TO_VOLUME) cap.set(Capabilities.VOLUMES_ACCESSIBLE_BY_ACCESS_GROUP) cap.set(Capabilities.VOLUME_ISCSI_CHAP_AUTHENTICATION) if self._flag_ag_support: cap.set(Capabilities.ACCESS_GROUP_CREATE_ISCSI_IQN) cap.set(Capabilities.ACCESS_GROUP_INITIATOR_ADD_ISCSI_IQN) cap.set(Capabilities.ACCESS_GROUP_INITIATOR_DELETE) cap.set(Capabilities.ACCESS_GROUP_DELETE) return cap @handle_errors def plugin_info(self, flags=0): return "Linux LIO target support", VERSION @handle_errors def systems(self, flags=0): # verify we're online self._jsonrequest("pool_list") return [self.system] @handle_errors def job_status(self, job_id, flags=0): raise LsmError(ErrorNumber.NO_SUPPORT, "Not supported") @handle_errors def job_free(self, job_id, flags=0): raise LsmError(ErrorNumber.NO_SUPPORT, "Not supported") @staticmethod def _uuid_to_vpd83(uuid): """ Convert LVM UUID to VPD 83 Device ID. LIO kernel module(target_core_mod.ko) does not expose VPD83 via ConfigFs. Targetd does not expose VPD83 via its API. Hence we have to do the convention here base on kernel code. """ # NAA IEEE Registered Extended Identifier/Designator. # SPC-4 rev37a 7.8.6.6.5 vpd83 = '6' # Use OpenFabrics IEEE Company ID: 00 14 05 # https://standards.ieee.org/develop/regauth/oui/oui.txt vpd83 += '001405' # Take all [a-f0-9] digits from UUID for VENDOR_SPECIFIC_IDENTIFIER # and VENDOR_SPECIFIC_IDENTIFIER_EXTENTION vpd83 += re.sub('[^a-f0-9]', '', uuid.lower())[:25] # Fill up with zero. vpd83 += '0' * (32 - len(vpd83)) return vpd83 @handle_errors def volumes(self, search_key=None, search_value=None, flags=0): volumes = [] for p_name in (p['name'] for p in self._jsonrequest("pool_list") if p['type'] == 'block'): for vol in self._jsonrequest("vol_list", dict(pool=p_name)): vpd83 = TargetdStorage._uuid_to_vpd83(vol['uuid']) volumes.append( Volume(vol['uuid'], vol['name'], vpd83, 512, long(vol['size'] / 512), Volume.ADMIN_STATE_ENABLED, self.system.id, p_name)) return search_property(volumes, search_key, search_value) @handle_errors def pools(self, search_key=None, search_value=None, flags=0): pools = [] for pool in self._jsonrequest("pool_list"): if pool['name'].startswith('/'): et = Pool.ELEMENT_TYPE_FS else: et = Pool.ELEMENT_TYPE_VOLUME pools.append(Pool(pool['name'], pool['name'], et, 0, pool['size'], pool['free_size'], Pool.STATUS_UNKNOWN, '', 'targetd')) return search_property(pools, search_key, search_value) @staticmethod def _tgt_ag_to_lsm(tgt_ag, sys_id): return AccessGroup( tgt_ag['name'], tgt_ag['name'], tgt_ag['init_ids'], AccessGroup.INIT_TYPE_ISCSI_IQN, sys_id) @staticmethod def _tgt_init_to_lsm(tgt_init, sys_id): return AccessGroup( "%s%s" % ( TargetdStorage._FAKE_AG_PREFIX, md5(tgt_init['init_id'])), 'N/A', [tgt_init['init_id']], AccessGroup.INIT_TYPE_ISCSI_IQN, sys_id) @handle_errors def access_groups(self, search_key=None, search_value=None, flags=0): rc_lsm_ags = [] # For backward compatibility if self._flag_ag_support is True: tgt_inits = self._jsonrequest( 'initiator_list', {'standalone_only': True}) else: tgt_inits = list( {'init_id': x} for x in set( i['initiator_wwn'] for i in self._jsonrequest("export_list"))) rc_lsm_ags.extend( list( TargetdStorage._tgt_init_to_lsm(i, self.system.id) for i in tgt_inits)) if self._flag_ag_support is True: for tgt_ag in self._jsonrequest('access_group_list'): rc_lsm_ags.append( TargetdStorage._tgt_ag_to_lsm( tgt_ag, self.system.id)) return search_property(rc_lsm_ags, search_key, search_value) def _lsm_ag_of_id(self, ag_id, lsm_error_obj=None): """ Raise provided error if defined when not found. Return lsm.AccessGroup if found. """ lsm_ags = self.access_groups() for lsm_ag in lsm_ags: if lsm_ag.id == ag_id: return lsm_ag if lsm_error_obj: raise lsm_error_obj @handle_errors def access_group_create(self, name, init_id, init_type, system, flags=0): if system.id != self.system.id: raise LsmError( ErrorNumber.NOT_FOUND_SYSTEM, "System %s not found" % system.id) if self._flag_ag_support is False: raise LsmError( ErrorNumber.NO_SUPPORT, "Please upgrade your targetd package to support " "access_group_create()") if init_type != AccessGroup.INIT_TYPE_ISCSI_IQN: raise LsmError(ErrorNumber.NO_SUPPORT, "Only iSCSI yet") try: self._jsonrequest( "access_group_create", dict(ag_name=name, init_id=init_id, init_type='iscsi')) except TargetdError as tgt_error: if tgt_error.errno == TargetdError.EXISTS_INITIATOR: raise LsmError( ErrorNumber.EXISTS_INITIATOR, "Initiator is already used by other access group") elif tgt_error.errno == TargetdError.NAME_CONFLICT: raise LsmError( ErrorNumber.NAME_CONFLICT, "Requested access group name is already used by other " "access group") elif tgt_error.errno == TargetdError.INVALID_ARGUMENT: raise LsmError( ErrorNumber.INVALID_ARGUMENT, str(tgt_error)) else: raise return self._lsm_ag_of_id( name, LsmError( ErrorNumber.PLUGIN_BUG, "access_group_create(): Failed to find the newly created " "access group")) @handle_errors def access_group_initiator_add(self, access_group, init_id, init_type, flags=0): if init_type != AccessGroup.INIT_TYPE_ISCSI_IQN: raise LsmError( ErrorNumber.NO_SUPPORT, "Targetd only support iscsi") lsm_ag = self._lsm_ag_of_id( access_group.name, LsmError( ErrorNumber.NOT_FOUND_ACCESS_GROUP, "Access group not found")) # Pre-check for NO_STATE_CHANGE error as targetd silently pass # if initiator is already in requested access group. if init_id in lsm_ag.init_ids: raise LsmError( ErrorNumber.NO_STATE_CHANGE, "Requested init_id is already in defined access group") try: self._jsonrequest( "access_group_init_add", dict( ag_name=access_group.name, init_id=init_id, init_type='iscsi')) except TargetdError as tgt_error: if tgt_error.errno == TargetdError.EXISTS_INITIATOR: raise LsmError( ErrorNumber.EXISTS_INITIATOR, "Initiator is already used by other access group") else: raise return self._lsm_ag_of_id( access_group.name, LsmError( ErrorNumber.PLUGIN_BUG, "access_group_initiator_add(): " "Failed to find the updated access group")) @handle_errors def access_group_initiator_delete(self, access_group, init_id, init_type, flags=0): if init_type != AccessGroup.INIT_TYPE_ISCSI_IQN: raise LsmError( ErrorNumber.NO_SUPPORT, "Targetd only support iscsi") # Pre-check for NO_STATE_CHANGE as targetd sliently return # when init_id not in requested access_group. lsm_ag = self._lsm_ag_of_id( access_group.name, LsmError( ErrorNumber.NOT_FOUND_ACCESS_GROUP, "Access group not found")) if init_id not in lsm_ag.init_ids: raise LsmError( ErrorNumber.NO_STATE_CHANGE, "Requested initiator is not in defined access group") if len(lsm_ag.init_ids) == 1: raise LsmError( ErrorNumber.LAST_INIT_IN_ACCESS_GROUP, "Refused to remove the last initiator from access group") self._jsonrequest( "access_group_init_del", dict( ag_name=access_group.name, init_id=init_id, init_type='iscsi')) return self._lsm_ag_of_id( access_group.name, LsmError( ErrorNumber.PLUGIN_BUG, "access_group_initiator_delete(): " "Failed to find the updated access group")) @handle_errors def access_group_delete(self, access_group, flags=0): if access_group.id.startswith(TargetdStorage._FAKE_AG_PREFIX): raise LsmError( ErrorNumber.NO_SUPPORT, "Cannot delete old initiator simulated access group, " "they will be automatically deleted when no volume masked to") if self._flag_ag_support is False: raise LsmError( ErrorNumber.NO_SUPPORT, "Please upgrade your targetd package to support " "access_group_delete()") self._lsm_ag_of_id( access_group.id, LsmError( ErrorNumber.NOT_FOUND_ACCESS_GROUP, "Access group not found")) if list(m for m in self._tgt_masks() if m['ag_id'] == access_group.id): raise LsmError( ErrorNumber.IS_MASKED, "Cannot delete access group which has volume masked to") self._jsonrequest( "access_group_destroy", {'ag_name': access_group.name}) return None def _tgt_masks(self): """ Return a list of tgt_mask: { 'pool_name': pool_name, 'vol_name': vol_name, 'ag_id': lsm_ag.id, 'h_lun_id': h_lun_id, } """ tgt_masks = [] for tgt_exp in self._jsonrequest("export_list"): tgt_masks.append({ 'ag_id': "%s%s" % ( TargetdStorage._FAKE_AG_PREFIX, md5(tgt_exp['initiator_wwn'])), 'vol_name': tgt_exp['vol_name'], 'pool_name': tgt_exp['pool'], 'h_lun_id': tgt_exp['lun'], }) if self._flag_ag_support: for tgt_ag_map in self._jsonrequest("access_group_map_list"): tgt_masks.append({ 'ag_id': tgt_ag_map['ag_name'], 'vol_name': tgt_ag_map['vol_name'], 'pool_name': tgt_ag_map['pool_name'], 'h_lun_id': tgt_ag_map['h_lun_id'], }) return tgt_masks def _is_masked(self, ag_id, pool_name, vol_name, tgt_masks=None): """ Check whether volume is masked to certain access group. Return True or False """ if tgt_masks is None: tgt_masks = self._tgt_masks() return list( m for m in tgt_masks if (m['vol_name'] == vol_name and m['pool_name'] == pool_name and m['ag_id'] == ag_id) ) != [] def _lsm_vol_of_id(self, vol_id, error=None): try: return list(v for v in self.volumes() if v.id == vol_id)[0] except IndexError: if error: raise error else: return None @handle_errors def volume_mask(self, access_group, volume, flags=0): self._lsm_ag_of_id( access_group.id, LsmError( ErrorNumber.NOT_FOUND_ACCESS_GROUP, "Access group not found")) self._lsm_vol_of_id( volume.id, LsmError( ErrorNumber.NOT_FOUND_VOLUME, "Volume not found")) tgt_masks = self._tgt_masks() if self._is_masked( access_group.id, volume.pool_id, volume.name, tgt_masks): raise LsmError( ErrorNumber.NO_STATE_CHANGE, "Volume is already masked to requested access group") if access_group.id.startswith(TargetdStorage._FAKE_AG_PREFIX): free_h_lun_ids = ( set(range(TargetdStorage._MAX_H_LUN_ID + 1)) - set([m['h_lun_id'] for m in tgt_masks])) if len(free_h_lun_ids) == 0: # TODO(Gris Ge): Add SYSTEM_LIMIT error into API raise LsmError( ErrorNumber.PLUGIN_BUG, "System limit: targetd only allows %s LUN masked" % TargetdStorage._MAX_H_LUN_ID) h_lun_id = free_h_lun_ids.pop() self._jsonrequest( "export_create", { 'pool': volume.pool_id, 'vol': volume.name, 'initiator_wwn': access_group.init_ids[0], 'lun': h_lun_id }) else: try: self._jsonrequest( 'access_group_map_create', { 'pool_name': volume.pool_id, 'vol_name': volume.name, 'ag_name': access_group.id, }) except TargetdError as tgt_error: if tgt_error.errno == TargetdError.NO_FREE_HOST_LUN_ID: # TODO(Gris Ge): Add SYSTEM_LIMIT error into API raise LsmError( ErrorNumber.PLUGIN_BUG, "System limit: targetd only allows %s LUN masked" % TargetdStorage._MAX_H_LUN_ID) elif tgt_error.errno == TargetdError.EMPTY_ACCESS_GROUP: raise LsmError( ErrorNumber.NOT_FOUND_ACCESS_GROUP, "Access group not found") else: raise return None @handle_errors def volume_unmask(self, volume, access_group, flags=0): self._lsm_ag_of_id( access_group.id, LsmError( ErrorNumber.NOT_FOUND_ACCESS_GROUP, "Access group not found")) self._lsm_vol_of_id( volume.id, LsmError( ErrorNumber.NOT_FOUND_VOLUME, "Volume not found")) # Pre-check if already unmasked if not self._is_masked(access_group.id, volume.pool_id, volume.name): raise LsmError(ErrorNumber.NO_STATE_CHANGE, "Volume is not masked to requested access group") if access_group.id.startswith(TargetdStorage._FAKE_AG_PREFIX): self._jsonrequest("export_destroy", dict(pool=volume.pool_id, vol=volume.name, initiator_wwn=access_group.init_ids[0])) else: self._jsonrequest( "access_group_map_destroy", { 'pool_name': volume.pool_id, 'vol_name': volume.name, 'ag_name': access_group.id, }) return None @handle_errors def volumes_accessible_by_access_group(self, access_group, flags=0): tgt_masks = self._tgt_masks() vol_infos = list( [m['vol_name'], m['pool_name']] for m in tgt_masks if m['ag_id'] == access_group.id) if len(vol_infos) == 0: return [] rc_lsm_vols = [] return list( lsm_vol for lsm_vol in self.volumes(flags=flags) if [lsm_vol.name, lsm_vol.pool_id] in vol_infos) @handle_errors def access_groups_granted_to_volume(self, volume, flags=0): tgt_masks = self._tgt_masks() ag_ids = list( m['ag_id'] for m in tgt_masks if (m['vol_name'] == volume.name and m['pool_name'] == volume.pool_id)) lsm_ags = self.access_groups(flags=flags) return [x for x in lsm_ags if x.id in ag_ids] def _get_volume(self, pool_id, volume_name): vol = [v for v in self._jsonrequest("vol_list", dict(pool=pool_id)) if v['name'] == volume_name][0] vpd83 = TargetdStorage._uuid_to_vpd83(vol['uuid']) return Volume(vol['uuid'], vol['name'], vpd83, 512, vol['size'] / 512, Volume.ADMIN_STATE_ENABLED, self.system.id, pool_id) def _get_fs(self, pool_id, fs_name): fs = self.fs() for f in fs: if f.name == fs_name and f.pool_id == pool_id: return f return None def _get_ss(self, fs, ss_name): ss = self.fs_snapshots(fs) for s in ss: if s.name == ss_name: return s return None @handle_errors def volume_create(self, pool, volume_name, size_bytes, provisioning, flags=0): if provisioning != Volume.PROVISION_DEFAULT: raise LsmError(ErrorNumber.INVALID_ARGUMENT, "Unsupported provisioning") # Make sure size_bytes round up with _LVM_SECTOR_SIZE if size_bytes: remainder = size_bytes % _LVM_SECTOR_SIZE if remainder: size_bytes = size_bytes + _LVM_SECTOR_SIZE - remainder else: size_bytes = _LVM_SECTOR_SIZE self._jsonrequest("vol_create", dict(pool=pool.id, name=volume_name, size=size_bytes)) return None, self._get_volume(pool.id, volume_name) @handle_errors def volume_delete(self, volume, flags=0): try: self._jsonrequest("vol_destroy", dict(pool=volume.pool_id, name=volume.name)) except TargetdError as te: if te.errno == TargetdError.VOLUME_MASKED: raise LsmError(ErrorNumber.IS_MASKED, "Volume is masked to access group") raise @handle_errors def volume_replicate(self, pool, rep_type, volume_src, name, flags=0): if rep_type != Volume.REPLICATE_COPY: raise LsmError(ErrorNumber.NO_SUPPORT, "Not supported") #pool id is optional, use volume src as default pool_id = volume_src.pool_id if pool: pool_id = pool.id self._jsonrequest("vol_copy", dict(pool=pool_id, vol_orig=volume_src.name, vol_new=name)) return None, self._get_volume(pool_id, name) @handle_errors def iscsi_chap_auth(self, init_id, in_user, in_password, out_user, out_password, flags=0): self._jsonrequest("initiator_set_auth", dict(initiator_wwn=init_id, in_user=in_user, in_pass=in_password, out_user=out_user, out_pass=out_password)) return None @handle_errors def fs(self, search_key=None, search_value=None, flags=0): rc = [] for fs in self._jsonrequest("fs_list"): #self, id, name, total_space, free_space, pool_id, system_id rc.append(FileSystem(fs['uuid'], fs['name'], fs['total_space'], fs['free_space'], fs['pool'], self.system.id)) return search_property(rc, search_key, search_value) @handle_errors def fs_delete(self, fs, flags=0): self._jsonrequest("fs_destroy", dict(uuid=fs.id)) @handle_errors def fs_create(self, pool, name, size_bytes, flags=0): self._jsonrequest("fs_create", dict(pool_name=pool.id, name=name, size_bytes=size_bytes)) return None, self._get_fs(pool.name, name) @handle_errors def fs_clone(self, src_fs, dest_fs_name, snapshot=None, flags=0): ss_id = None if snapshot: ss_id = snapshot.id self._jsonrequest("fs_clone", dict(fs_uuid=src_fs.id, dest_fs_name=dest_fs_name, snapshot_id=ss_id)) return None, self._get_fs(src_fs.pool_id, dest_fs_name) @handle_errors def fs_snapshots(self, fs, flags=0): rc = [] for ss in self._jsonrequest("ss_list", dict(fs_uuid=fs.id)): #id, name, timestamp rc.append(FsSnapshot(ss['uuid'], ss['name'], ss['timestamp'])) return rc @handle_errors def fs_snapshot_create(self, fs, snapshot_name, flags=0): self._jsonrequest("fs_snapshot", dict(fs_uuid=fs.id, dest_ss_name=snapshot_name)) return None, self._get_ss(fs, snapshot_name) @handle_errors def fs_snapshot_delete(self, fs, snapshot, flags=0): self._jsonrequest("fs_snapshot_delete", dict(fs_uuid=fs.id, ss_uuid=snapshot.id)) @handle_errors def export_auth(self, flags=0): exports = self._jsonrequest("nfs_export_auth_list") return exports @staticmethod def _get_value(options, key): for o in options: if '=' in o: k, v = o.split('=') if k == key: return v return None @staticmethod def _option_string(nfs_options): cpy = copy.copy(nfs_options) if 'ro' in cpy: cpy.remove('ro') if 'rw' in cpy: cpy.remove('rw') if 'no_root_squash' in cpy: cpy.remove('no_root_squash') if 'root_squash' in cpy: cpy.remove('root_squash') cpy.sort() s = ','.join(cpy) return s @staticmethod def _calculate_export_md5(export_path, options): opts = TargetdStorage._option_string(options) return md5(export_path + opts) @handle_errors def exports(self, search_key=None, search_value=None, flags=0): tmp_exports = {} exports = [] fs_full_paths = {} all_nfs_exports = self._jsonrequest("nfs_export_list") nfs_exports = [] #Remove those that are not of FS origin fs_list = self._jsonrequest("fs_list") for f in fs_list: fs_full_paths[f['full_path']] = f for export in all_nfs_exports: if export['path'] in fs_full_paths: nfs_exports.append(export) #Collect like exports to minimize results for export in nfs_exports: key = export['path'] + \ TargetdStorage._option_string(export['options']) if key in tmp_exports: tmp_exports[key].append(export) else: tmp_exports[key] = [export] #Walk through the options for le in tmp_exports.values(): export_id = "" root = [] rw = [] ro = [] sec = None anonuid = NfsExport.ANON_UID_GID_NA anongid = NfsExport.ANON_UID_GID_NA options = None for export in le: host = export['host'] export_id += host export_id += export['path'] export_id += fs_full_paths[export['path']]['uuid'] options = export['options'] if 'rw' in options: rw.append(host) if 'ro' in options: ro.append(host) sec = TargetdStorage._get_value(options, 'sec') if sec is None: sec = 'sys' if 'no_root_squash' in options: root.append(host) uid = TargetdStorage._get_value(options, 'anonuid') if uid is not None: anonuid = uid gid = TargetdStorage._get_value(options, 'anongid') if gid is not None: anongid = gid exports.append( NfsExport(TargetdStorage._calculate_export_md5(export['path'], options), fs_full_paths[export['path']]['uuid'], export['path'], sec, root, rw, ro, anonuid, anongid, TargetdStorage._option_string(options))) return search_property(exports, search_key, search_value) def _get_fs_path(self, fs_id): for fs in self._jsonrequest("fs_list"): if fs_id == fs['uuid']: return fs['full_path'] return None @handle_errors def export_fs( self, fs_id, export_path, root_list, rw_list, ro_list, anon_uid=NfsExport.ANON_UID_GID_NA, anon_gid=NfsExport.ANON_UID_GID_NA, auth_type=None, options=None, flags=0): if export_path is not None: raise LsmError(ErrorNumber.INVALID_ARGUMENT, 'export_path required to be None') base_opts = [] if anon_uid is not None: base_opts.append('anonuid=%s' % str(anon_uid)) if anon_gid is not None: base_opts.append('anongid=%s' % str(anon_gid)) if auth_type is not None: base_opts.append('sec=%s' % str(auth_type)) fs_path = self._get_fs_path(fs_id) if fs_path is None: raise LsmError(ErrorNumber.NOT_FOUND_FS, "File system not found") for host in rw_list: tmp_opts = copy.copy(base_opts) if host in root_list: tmp_opts.append('no_root_squash') tmp_opts.append('rw') self._jsonrequest("nfs_export_add", dict(host=host, path=fs_path, export_path=None, options=tmp_opts)) for host in ro_list: tmp_opts = copy.copy(base_opts) if host in root_list: tmp_opts.append('no_root_squash') tmp_opts.append('ro') self._jsonrequest("nfs_export_add", dict(host=host, path=fs_path, export_path=None, options=tmp_opts)) #Kind of a pain to determine which export was newly created as it #could get merged into an existing record, doh! #Make sure fs_id's match and that one of the hosts is in the #record. exports = self.exports() h = [] h.extend(rw_list) h.extend(ro_list) for host in exports: if host.fs_id == fs_id: l = [] l.extend(host.ro) l.extend(host.rw) for host_entry in h: if host_entry in l: return host raise LsmError(ErrorNumber.PLUGIN_BUG, "Failed to create export") @handle_errors def export_remove(self, export, flags=0): for host in export.rw: params = dict(host=host, path=export.export_path) self._jsonrequest("nfs_export_remove", params) for host in export.ro: params = dict(host=host, path=export.export_path) self._jsonrequest("nfs_export_remove", params) def _jsonrequest(self, method, params=None): data = json.dumps(dict(id=self.rpc_id, method=method, params=params, jsonrpc="2.0")) self.rpc_id += 1 try: request = urllib2.Request(self.url, data, self.headers) response_obj = urllib2.urlopen(request) except socket.error: raise LsmError(ErrorNumber.NETWORK_ERROR, "Unable to connect to targetd, uri right?") response_data = response_obj.read() response = json.loads(response_data) if response.get('error', None) is None: return response.get('result') else: if response['error']['code'] <= 0: #error_text = "%s:%s" % (str(response['error']['code']), # response['error'].get('message', '')) raise TargetdError(abs(int(response['error']['code'])), response['error'].get('message', '')) else: # +code is async execution id #Async completion, polling for results async_code = response['error']['code'] while True: time.sleep(1) results = self._jsonrequest('async_list') status = results.get(str(async_code), None) if status: if status[0]: raise LsmError( ErrorNumber.PLUGIN_BUG, "%d has error %d" % (async_code, status[0])) libstoragemgmt-1.2.3/plugin/targetd/__init__.py0000664000175000017500000000000012537546123016517 00000000000000libstoragemgmt-1.2.3/plugin/simc/0000775000175000017500000000000012542455463014003 500000000000000libstoragemgmt-1.2.3/plugin/simc/Makefile.am0000664000175000017500000000042512537546123015756 00000000000000AM_CPPFLAGS = \ -I$(top_srcdir)/c_binding/include \ -I@srcdir@/c_binding/include \ $(DEFS) $(LIBGLIB_CFLAGS) bin_PROGRAMS = simc_lsmplugin simc_lsmplugin_LDADD = \ ../../c_binding/libstoragemgmt.la \ $(LIBGLIB_LIBS) $(SSL_LIBS) simc_lsmplugin_SOURCES = simc_lsmplugin.c libstoragemgmt-1.2.3/plugin/simc/Makefile.in0000664000175000017500000005106612542455445016000 00000000000000# Makefile.in generated by automake 1.13.4 from Makefile.am. # @configure_input@ # Copyright (C) 1994-2013 Free Software Foundation, Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__is_gnu_make = test -n '$(MAKEFILE_LIST)' && test -n '$(MAKELEVEL)' am__make_running_with_option = \ case $${target_option-} in \ ?) ;; \ *) echo "am__make_running_with_option: internal error: invalid" \ "target option '$${target_option-}' specified" >&2; \ exit 1;; \ esac; \ has_opt=no; \ sane_makeflags=$$MAKEFLAGS; \ if $(am__is_gnu_make); then \ sane_makeflags=$$MFLAGS; \ else \ case $$MAKEFLAGS in \ *\\[\ \ ]*) \ bs=\\; \ sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \ | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \ esac; \ fi; \ skip_next=no; \ strip_trailopt () \ { \ flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \ }; \ for flg in $$sane_makeflags; do \ test $$skip_next = yes && { skip_next=no; continue; }; \ case $$flg in \ *=*|--*) continue;; \ -*I) strip_trailopt 'I'; skip_next=yes;; \ -*I?*) strip_trailopt 'I';; \ -*O) strip_trailopt 'O'; skip_next=yes;; \ -*O?*) strip_trailopt 'O';; \ -*l) strip_trailopt 'l'; skip_next=yes;; \ -*l?*) strip_trailopt 'l';; \ -[dEDm]) skip_next=yes;; \ -[JT]) skip_next=yes;; \ esac; \ case $$flg in \ *$$target_option*) has_opt=yes; break;; \ esac; \ done; \ test $$has_opt = yes am__make_dryrun = (target_option=n; $(am__make_running_with_option)) am__make_keepgoing = (target_option=k; $(am__make_running_with_option)) pkgdatadir = $(datadir)/@PACKAGE@ pkgincludedir = $(includedir)/@PACKAGE@ pkglibdir = $(libdir)/@PACKAGE@ pkglibexecdir = $(libexecdir)/@PACKAGE@ am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ bin_PROGRAMS = simc_lsmplugin$(EXEEXT) subdir = plugin/simc DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/Makefile.am \ $(top_srcdir)/build-aux/depcomp ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = $(top_srcdir)/m4/ax_python_module.m4 \ $(top_srcdir)/m4/libtool.m4 $(top_srcdir)/m4/ltoptions.m4 \ $(top_srcdir)/m4/ltsugar.m4 $(top_srcdir)/m4/ltversion.m4 \ $(top_srcdir)/m4/lt~obsolete.m4 $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) mkinstalldirs = $(install_sh) -d CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = CONFIG_CLEAN_VPATH_FILES = am__installdirs = "$(DESTDIR)$(bindir)" PROGRAMS = $(bin_PROGRAMS) am_simc_lsmplugin_OBJECTS = simc_lsmplugin.$(OBJEXT) simc_lsmplugin_OBJECTS = $(am_simc_lsmplugin_OBJECTS) am__DEPENDENCIES_1 = simc_lsmplugin_DEPENDENCIES = ../../c_binding/libstoragemgmt.la \ $(am__DEPENDENCIES_1) $(am__DEPENDENCIES_1) AM_V_lt = $(am__v_lt_@AM_V@) am__v_lt_ = $(am__v_lt_@AM_DEFAULT_V@) am__v_lt_0 = --silent am__v_lt_1 = AM_V_P = $(am__v_P_@AM_V@) am__v_P_ = $(am__v_P_@AM_DEFAULT_V@) am__v_P_0 = false am__v_P_1 = : AM_V_GEN = $(am__v_GEN_@AM_V@) am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@) am__v_GEN_0 = @echo " GEN " $@; am__v_GEN_1 = AM_V_at = $(am__v_at_@AM_V@) am__v_at_ = $(am__v_at_@AM_DEFAULT_V@) am__v_at_0 = @ am__v_at_1 = DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir) depcomp = $(SHELL) $(top_srcdir)/build-aux/depcomp am__depfiles_maybe = depfiles am__mv = mv -f COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \ $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) LTCOMPILE = $(LIBTOOL) $(AM_V_lt) --tag=CC $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) \ $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) \ $(AM_CFLAGS) $(CFLAGS) AM_V_CC = $(am__v_CC_@AM_V@) am__v_CC_ = $(am__v_CC_@AM_DEFAULT_V@) am__v_CC_0 = @echo " CC " $@; am__v_CC_1 = CCLD = $(CC) LINK = $(LIBTOOL) $(AM_V_lt) --tag=CC $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) \ $(AM_LDFLAGS) $(LDFLAGS) -o $@ AM_V_CCLD = $(am__v_CCLD_@AM_V@) am__v_CCLD_ = $(am__v_CCLD_@AM_DEFAULT_V@) am__v_CCLD_0 = @echo " CCLD " $@; am__v_CCLD_1 = SOURCES = $(simc_lsmplugin_SOURCES) DIST_SOURCES = $(simc_lsmplugin_SOURCES) am__can_run_installinfo = \ case $$AM_UPDATE_INFO_DIR in \ n|no|NO) false;; \ *) (install-info --version) >/dev/null 2>&1;; \ esac am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) # Read a list of newline-separated strings from the standard input, # and print each of them once, without duplicates. Input order is # *not* preserved. am__uniquify_input = $(AWK) '\ BEGIN { nonempty = 0; } \ { items[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in items) print i; }; } \ ' # Make sure the list of sources is unique. This is necessary because, # e.g., the same source file might be shared among _SOURCES variables # for different programs/libraries. am__define_uniq_tagged_files = \ list='$(am__tagged_files)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | $(am__uniquify_input)` ETAGS = etags CTAGS = ctags DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) ACLOCAL = @ACLOCAL@ AMTAR = @AMTAR@ AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@ AR = @AR@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ FGREP = @FGREP@ GREP = @GREP@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ JSON_CFLAGS = @JSON_CFLAGS@ JSON_LIBS = @JSON_LIBS@ LD = @LD@ LDFLAGS = @LDFLAGS@ LIBCHECK_CFLAGS = @LIBCHECK_CFLAGS@ LIBCHECK_LIBS = @LIBCHECK_LIBS@ LIBCONFIG_CFLAGS = @LIBCONFIG_CFLAGS@ LIBCONFIG_LIBS = @LIBCONFIG_LIBS@ LIBGLIB_CFLAGS = @LIBGLIB_CFLAGS@ LIBGLIB_LIBS = @LIBGLIB_LIBS@ LIBMICROHTTPD_CFLAGS = @LIBMICROHTTPD_CFLAGS@ LIBMICROHTTPD_LIBS = @LIBMICROHTTPD_LIBS@ LIBOBJS = @LIBOBJS@ LIBS = @LIBS@ LIBSM_LIBTOOL_VERSION = @LIBSM_LIBTOOL_VERSION@ LIBSM_MAJOR_VERSION = @LIBSM_MAJOR_VERSION@ LIBSM_MICRO_VERSION = @LIBSM_MICRO_VERSION@ LIBSM_MINOR_VERSION = @LIBSM_MINOR_VERSION@ LIBSM_VERSION = @LIBSM_VERSION@ LIBSM_VERSION_INFO = @LIBSM_VERSION_INFO@ LIBSM_VERSION_NUMBER = @LIBSM_VERSION_NUMBER@ LIBTOOL = @LIBTOOL@ LIBXML_CFLAGS = @LIBXML_CFLAGS@ LIBXML_LIBS = @LIBXML_LIBS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBOBJS = @LTLIBOBJS@ MAINT = @MAINT@ MAKEINFO = @MAKEINFO@ MANIFEST_TOOL = @MANIFEST_TOOL@ MKDIR_P = @MKDIR_P@ NM = @NM@ NMEDIT = @NMEDIT@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_URL = @PACKAGE_URL@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PERL = @PERL@ PKG_CONFIG = @PKG_CONFIG@ PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@ PKG_CONFIG_PATH = @PKG_CONFIG_PATH@ PYTHON = @PYTHON@ PYTHON_EXEC_PREFIX = @PYTHON_EXEC_PREFIX@ PYTHON_PLATFORM = @PYTHON_PLATFORM@ PYTHON_PREFIX = @PYTHON_PREFIX@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SSL_LIBS = @SSL_LIBS@ STRIP = @STRIP@ VERSION = @VERSION@ YAJL_LIBS = @YAJL_LIBS@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_AR = @ac_ct_AR@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ bashcompletiondir = @bashcompletiondir@ bindir = @bindir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ datadir = @datadir@ datarootdir = @datarootdir@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ install_sh = @install_sh@ libdir = @libdir@ libexecdir = @libexecdir@ localedir = @localedir@ localstatedir = @localstatedir@ mandir = @mandir@ mkdir_p = @mkdir_p@ oldincludedir = @oldincludedir@ pdfdir = @pdfdir@ pkgpyexecdir = @pkgpyexecdir@ pkgpythondir = @pkgpythondir@ prefix = @prefix@ program_transform_name = @program_transform_name@ psdir = @psdir@ pyexecdir = @pyexecdir@ pythondir = @pythondir@ sbindir = @sbindir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ systemdsystemunitdir = @systemdsystemunitdir@ target_alias = @target_alias@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ AM_CPPFLAGS = \ -I$(top_srcdir)/c_binding/include \ -I@srcdir@/c_binding/include \ $(DEFS) $(LIBGLIB_CFLAGS) simc_lsmplugin_LDADD = \ ../../c_binding/libstoragemgmt.la \ $(LIBGLIB_LIBS) $(SSL_LIBS) simc_lsmplugin_SOURCES = simc_lsmplugin.c all: all-am .SUFFIXES: .SUFFIXES: .c .lo .o .obj $(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu plugin/simc/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --gnu plugin/simc/Makefile .PRECIOUS: Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): install-binPROGRAMS: $(bin_PROGRAMS) @$(NORMAL_INSTALL) @list='$(bin_PROGRAMS)'; test -n "$(bindir)" || list=; \ if test -n "$$list"; then \ echo " $(MKDIR_P) '$(DESTDIR)$(bindir)'"; \ $(MKDIR_P) "$(DESTDIR)$(bindir)" || exit 1; \ fi; \ for p in $$list; do echo "$$p $$p"; done | \ sed 's/$(EXEEXT)$$//' | \ while read p p1; do if test -f $$p \ || test -f $$p1 \ ; then echo "$$p"; echo "$$p"; else :; fi; \ done | \ sed -e 'p;s,.*/,,;n;h' \ -e 's|.*|.|' \ -e 'p;x;s,.*/,,;s/$(EXEEXT)$$//;$(transform);s/$$/$(EXEEXT)/' | \ sed 'N;N;N;s,\n, ,g' | \ $(AWK) 'BEGIN { files["."] = ""; dirs["."] = 1 } \ { d=$$3; if (dirs[d] != 1) { print "d", d; dirs[d] = 1 } \ if ($$2 == $$4) files[d] = files[d] " " $$1; \ else { print "f", $$3 "/" $$4, $$1; } } \ END { for (d in files) print "f", d, files[d] }' | \ while read type dir files; do \ if test "$$dir" = .; then dir=; else dir=/$$dir; fi; \ test -z "$$files" || { \ echo " $(INSTALL_PROGRAM_ENV) $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL_PROGRAM) $$files '$(DESTDIR)$(bindir)$$dir'"; \ $(INSTALL_PROGRAM_ENV) $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL_PROGRAM) $$files "$(DESTDIR)$(bindir)$$dir" || exit $$?; \ } \ ; done uninstall-binPROGRAMS: @$(NORMAL_UNINSTALL) @list='$(bin_PROGRAMS)'; test -n "$(bindir)" || list=; \ files=`for p in $$list; do echo "$$p"; done | \ sed -e 'h;s,^.*/,,;s/$(EXEEXT)$$//;$(transform)' \ -e 's/$$/$(EXEEXT)/' \ `; \ test -n "$$list" || exit 0; \ echo " ( cd '$(DESTDIR)$(bindir)' && rm -f" $$files ")"; \ cd "$(DESTDIR)$(bindir)" && rm -f $$files clean-binPROGRAMS: @list='$(bin_PROGRAMS)'; test -n "$$list" || exit 0; \ echo " rm -f" $$list; \ rm -f $$list || exit $$?; \ test -n "$(EXEEXT)" || exit 0; \ list=`for p in $$list; do echo "$$p"; done | sed 's/$(EXEEXT)$$//'`; \ echo " rm -f" $$list; \ rm -f $$list simc_lsmplugin$(EXEEXT): $(simc_lsmplugin_OBJECTS) $(simc_lsmplugin_DEPENDENCIES) $(EXTRA_simc_lsmplugin_DEPENDENCIES) @rm -f simc_lsmplugin$(EXEEXT) $(AM_V_CCLD)$(LINK) $(simc_lsmplugin_OBJECTS) $(simc_lsmplugin_LDADD) $(LIBS) mostlyclean-compile: -rm -f *.$(OBJEXT) distclean-compile: -rm -f *.tab.c @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/simc_lsmplugin.Po@am__quote@ .c.o: @am__fastdepCC_TRUE@ $(AM_V_CC)depbase=`echo $@ | sed 's|[^/]*$$|$(DEPDIR)/&|;s|\.o$$||'`;\ @am__fastdepCC_TRUE@ $(COMPILE) -MT $@ -MD -MP -MF $$depbase.Tpo -c -o $@ $< &&\ @am__fastdepCC_TRUE@ $(am__mv) $$depbase.Tpo $$depbase.Po @AMDEP_TRUE@@am__fastdepCC_FALSE@ $(AM_V_CC)source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCC_FALSE@ $(AM_V_CC@am__nodep@)$(COMPILE) -c -o $@ $< .c.obj: @am__fastdepCC_TRUE@ $(AM_V_CC)depbase=`echo $@ | sed 's|[^/]*$$|$(DEPDIR)/&|;s|\.obj$$||'`;\ @am__fastdepCC_TRUE@ $(COMPILE) -MT $@ -MD -MP -MF $$depbase.Tpo -c -o $@ `$(CYGPATH_W) '$<'` &&\ @am__fastdepCC_TRUE@ $(am__mv) $$depbase.Tpo $$depbase.Po @AMDEP_TRUE@@am__fastdepCC_FALSE@ $(AM_V_CC)source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCC_FALSE@ $(AM_V_CC@am__nodep@)$(COMPILE) -c -o $@ `$(CYGPATH_W) '$<'` .c.lo: @am__fastdepCC_TRUE@ $(AM_V_CC)depbase=`echo $@ | sed 's|[^/]*$$|$(DEPDIR)/&|;s|\.lo$$||'`;\ @am__fastdepCC_TRUE@ $(LTCOMPILE) -MT $@ -MD -MP -MF $$depbase.Tpo -c -o $@ $< &&\ @am__fastdepCC_TRUE@ $(am__mv) $$depbase.Tpo $$depbase.Plo @AMDEP_TRUE@@am__fastdepCC_FALSE@ $(AM_V_CC)source='$<' object='$@' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCC_FALSE@ $(AM_V_CC@am__nodep@)$(LTCOMPILE) -c -o $@ $< mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs ID: $(am__tagged_files) $(am__define_uniq_tagged_files); mkid -fID $$unique tags: tags-am TAGS: tags tags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files) set x; \ here=`pwd`; \ $(am__define_uniq_tagged_files); \ shift; \ if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ if test $$# -gt 0; then \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ "$$@" $$unique; \ else \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ $$unique; \ fi; \ fi ctags: ctags-am CTAGS: ctags ctags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files) $(am__define_uniq_tagged_files); \ test -z "$(CTAGS_ARGS)$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$unique GTAGS: here=`$(am__cd) $(top_builddir) && pwd` \ && $(am__cd) $(top_srcdir) \ && gtags -i $(GTAGS_ARGS) "$$here" cscopelist: cscopelist-am cscopelist-am: $(am__tagged_files) list='$(am__tagged_files)'; \ case "$(srcdir)" in \ [\\/]* | ?:[\\/]*) sdir="$(srcdir)" ;; \ *) sdir=$(subdir)/$(srcdir) ;; \ esac; \ for i in $$list; do \ if test -f "$$i"; then \ echo "$(subdir)/$$i"; \ else \ echo "$$sdir/$$i"; \ fi; \ done >> $(top_builddir)/cscope.files distclean-tags: -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags distdir: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done check-am: all-am check: check-am all-am: Makefile $(PROGRAMS) installdirs: for dir in "$(DESTDIR)$(bindir)"; do \ test -z "$$dir" || $(MKDIR_P) "$$dir"; \ done install: install-am install-exec: install-exec-am install-data: install-data-am uninstall: uninstall-am install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-am install-strip: if test -z '$(STRIP)'; then \ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ install; \ else \ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ fi mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-am clean-am: clean-binPROGRAMS clean-generic clean-libtool mostlyclean-am distclean: distclean-am -rm -rf ./$(DEPDIR) -rm -f Makefile distclean-am: clean-am distclean-compile distclean-generic \ distclean-tags dvi: dvi-am dvi-am: html: html-am html-am: info: info-am info-am: install-data-am: install-dvi: install-dvi-am install-dvi-am: install-exec-am: install-binPROGRAMS install-html: install-html-am install-html-am: install-info: install-info-am install-info-am: install-man: install-pdf: install-pdf-am install-pdf-am: install-ps: install-ps-am install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-am -rm -rf ./$(DEPDIR) -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-am mostlyclean-am: mostlyclean-compile mostlyclean-generic \ mostlyclean-libtool pdf: pdf-am pdf-am: ps: ps-am ps-am: uninstall-am: uninstall-binPROGRAMS .MAKE: install-am install-strip .PHONY: CTAGS GTAGS TAGS all all-am check check-am clean \ clean-binPROGRAMS clean-generic clean-libtool cscopelist-am \ ctags ctags-am distclean distclean-compile distclean-generic \ distclean-libtool distclean-tags distdir dvi dvi-am html \ html-am info info-am install install-am install-binPROGRAMS \ install-data install-data-am install-dvi install-dvi-am \ install-exec install-exec-am install-html install-html-am \ install-info install-info-am install-man install-pdf \ install-pdf-am install-ps install-ps-am install-strip \ installcheck installcheck-am installdirs maintainer-clean \ maintainer-clean-generic mostlyclean mostlyclean-compile \ mostlyclean-generic mostlyclean-libtool pdf pdf-am ps ps-am \ tags tags-am uninstall uninstall-am uninstall-binPROGRAMS # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: libstoragemgmt-1.2.3/plugin/simc/simc_lsmplugin.c0000664000175000017500000023402412537737032017120 00000000000000/* * Copyright (C) 2011-2014 Red Hat, Inc. * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; If not, see . * * Author: tasleson * */ #include #include #define _XOPEN_SOURCE #include #include #include #include #include #include #include #include #include "libstoragemgmt/libstoragemgmt_targetport.h" #ifdef __cplusplus extern "C" { #endif static char name[] = "Compiled plug-in example"; static char version[] = "0.2.0"; static char sys_id[] = "sim-01"; #define BS 512 #define MAX_SYSTEMS 1 #define MAX_FS 32 #define MAX_EXPORT 32 /** * Creates a md5 string (DO NOT FREE RETURN VALUE as the string is static) * @param data Data to generate md5 * @return Pointer to string which contains the string digest */ char *md5(const char *data) { int i = 0; MD5_CTX c; unsigned char digest[16]; static char digest_str[33]; MD5_Init(&c); MD5_Update(&c, data, strlen(data)); MD5_Final(digest, &c); for (i = 0; i < sizeof(digest); ++i) { sprintf(&digest_str[i * 2], "%02x", (unsigned int) digest[i]); } return digest_str; } /** * Removes an item from an array, shifting the elements and clearing the space * that was occupied at the end, use with caution :-) * @param array Base address for the array * @param remove_index Element index to remove * @param num_elems Number of elements currently in the array * @param elem_size Size of each array element */ void remove_item(void *array, int remove_index, int num_elems, size_t elem_size) { if (array && (num_elems > 0) && (remove_index < num_elems) && elem_size) { /*Are we at the end?, clear that which is at the end */ if (remove_index + 1 == num_elems) { memset(array + (elem_size * (num_elems - 1)), 0, elem_size); return; } /* Calculate the position of the one after that we want to remove */ void *src_addr = (void *) (array + ((remove_index + 1) * elem_size)); /* Calculate the destination */ void *dest_addr = (void *) (array + (remove_index * elem_size)); /* Shift the memory */ memmove(dest_addr, src_addr, ((num_elems - 1) - remove_index) * elem_size); /* Clear that which was at the end */ memset(array + (elem_size * (num_elems - 1)), 0, elem_size); } } struct allocated_volume { lsm_volume *v; lsm_pool *p; }; struct allocated_fs { lsm_fs *fs; lsm_pool *p; GHashTable *ss; GHashTable *exports; }; struct allocated_ag { lsm_access_group *ag; lsm_access_group_init_type ag_type; }; struct plugin_data { uint32_t tmo; uint32_t num_systems; lsm_system *system[MAX_SYSTEMS]; GHashTable *access_groups; GHashTable *group_grant; GHashTable *fs; GHashTable *jobs; GHashTable *pools; GHashTable *volumes; GHashTable *disks; }; struct allocated_job { int polls; lsm_data_type type; void *return_data; }; struct allocated_job *alloc_allocated_job(lsm_data_type type, void *return_data) { struct allocated_job *rc = malloc(sizeof(struct allocated_job)); if (rc) { rc->polls = 0; rc->type = type; rc->return_data = return_data; } return rc; } void free_allocated_job(void *j) { struct allocated_job *job = j; if (job && job->return_data) { switch (job->type) { case (LSM_DATA_TYPE_ACCESS_GROUP): lsm_access_group_record_free((lsm_access_group *) job->return_data); break; case (LSM_DATA_TYPE_BLOCK_RANGE): lsm_block_range_record_free((lsm_block_range *) job->return_data); break; case (LSM_DATA_TYPE_FS): lsm_fs_record_free((lsm_fs *) job->return_data); break; case (LSM_DATA_TYPE_NFS_EXPORT): lsm_nfs_export_record_free((lsm_nfs_export *) job->return_data); break; case (LSM_DATA_TYPE_POOL): lsm_pool_record_free((lsm_pool *) job->return_data); break; case (LSM_DATA_TYPE_SS): lsm_fs_ss_record_free((lsm_fs_ss *) job->return_data); break; case (LSM_DATA_TYPE_STRING_LIST): lsm_string_list_free((lsm_string_list *) job->return_data); break; case (LSM_DATA_TYPE_SYSTEM): lsm_system_record_free((lsm_system *) job->return_data); break; case (LSM_DATA_TYPE_VOLUME): lsm_volume_record_free((lsm_volume *) job->return_data); break; default: break; } job->return_data = NULL; } free(job); } struct allocated_ag *alloc_allocated_ag(lsm_access_group * ag, lsm_access_group_init_type i) { struct allocated_ag *aag = (struct allocated_ag *) malloc(sizeof(struct allocated_ag)); if (aag) { aag->ag = ag; aag->ag_type = i; } return aag; } void free_allocated_ag(void *v) { if (v) { struct allocated_ag *aag = (struct allocated_ag *) v; lsm_access_group_record_free(aag->ag); free(aag); } } void free_pool_record(void *p) { if (p) { lsm_pool_record_free((lsm_pool *) p); } } void free_fs_record(struct allocated_fs *fs) { if (fs) { g_hash_table_destroy(fs->ss); g_hash_table_destroy(fs->exports); lsm_fs_record_free(fs->fs); fs->p = NULL; free(fs); } } static void free_ss(void *s) { lsm_fs_ss_record_free((lsm_fs_ss *) s); } static void free_export(void *exp) { lsm_nfs_export_record_free((lsm_nfs_export *) exp); } static struct allocated_fs *alloc_fs_record() { struct allocated_fs *rc = (struct allocated_fs *) malloc(sizeof(struct allocated_fs)); if (rc) { rc->fs = NULL; rc->p = NULL; rc->ss = g_hash_table_new_full(g_str_hash, g_str_equal, free, free_ss); rc->exports = g_hash_table_new_full(g_str_hash, g_str_equal, free, free_export); if (!rc->ss || !rc->exports) { if (rc->ss) { g_hash_table_destroy(rc->ss); } if (rc->exports) { g_hash_table_destroy(rc->exports); } free(rc); rc = NULL; } } return rc; } static int create_job(struct plugin_data *pd, char **job, lsm_data_type t, void *new_value, void **returned_value) { static int job_num = 0; int rc = LSM_ERR_JOB_STARTED; char job_id[64]; char *key = NULL; /* Make this random */ if (0) { if (returned_value) { *returned_value = new_value; } *job = NULL; rc = LSM_ERR_OK; } else { snprintf(job_id, sizeof(job_id), "JOB_%d", job_num); job_num += 1; if (returned_value) { *returned_value = NULL; } *job = strdup(job_id); key = strdup(job_id); struct allocated_job *value = alloc_allocated_job(t, new_value); if (*job && key && value) { g_hash_table_insert(pd->jobs, key, value); } else { free(*job); *job = NULL; free(key); key = NULL; free_allocated_job(value); value = NULL; rc = LSM_ERR_NO_MEMORY; } } return rc; } static int tmo_set(lsm_plugin_ptr c, uint32_t timeout, lsm_flag flags) { struct plugin_data *pd = (struct plugin_data *) lsm_private_data_get(c); if (pd) { pd->tmo = timeout; return LSM_ERR_OK; } return LSM_ERR_INVALID_ARGUMENT; } static int tmo_get(lsm_plugin_ptr c, uint32_t * timeout, lsm_flag flags) { struct plugin_data *pd = (struct plugin_data *) lsm_private_data_get(c); if (pd) { *timeout = pd->tmo; return LSM_ERR_OK; } return LSM_ERR_INVALID_ARGUMENT; } static int vol_accessible_by_ag(lsm_plugin_ptr c, lsm_access_group * group, lsm_volume ** volumes[], uint32_t * count, lsm_flag flags); static int ag_granted_to_volume(lsm_plugin_ptr c, lsm_volume * volume, lsm_access_group ** groups[], uint32_t * count, lsm_flag flags); static int cap(lsm_plugin_ptr c, lsm_system * system, lsm_storage_capabilities ** cap, lsm_flag flags) { int rc = LSM_ERR_NO_MEMORY; *cap = lsm_capability_record_alloc(NULL); if (*cap) { rc = lsm_capability_set_n(*cap, LSM_CAP_SUPPORTED, LSM_CAP_VOLUMES, LSM_CAP_VOLUME_CREATE, LSM_CAP_VOLUME_RESIZE, LSM_CAP_VOLUME_REPLICATE, LSM_CAP_VOLUME_REPLICATE_CLONE, LSM_CAP_VOLUME_REPLICATE_COPY, LSM_CAP_VOLUME_REPLICATE_MIRROR_ASYNC, LSM_CAP_VOLUME_REPLICATE_MIRROR_SYNC, LSM_CAP_VOLUME_COPY_RANGE_BLOCK_SIZE, LSM_CAP_VOLUME_COPY_RANGE, LSM_CAP_VOLUME_COPY_RANGE_CLONE, LSM_CAP_VOLUME_COPY_RANGE_COPY, LSM_CAP_VOLUME_DELETE, LSM_CAP_VOLUME_ENABLE, LSM_CAP_VOLUME_DISABLE, LSM_CAP_VOLUME_MASK, LSM_CAP_VOLUME_UNMASK, LSM_CAP_ACCESS_GROUPS, LSM_CAP_ACCESS_GROUP_CREATE_ISCSI_IQN, LSM_CAP_VOLUME_ISCSI_CHAP_AUTHENTICATION, LSM_CAP_ACCESS_GROUP_CREATE_WWPN, LSM_CAP_ACCESS_GROUP_INITIATOR_ADD_WWPN, LSM_CAP_ACCESS_GROUP_INITIATOR_DELETE, LSM_CAP_ACCESS_GROUP_DELETE, LSM_CAP_VOLUMES_ACCESSIBLE_BY_ACCESS_GROUP, LSM_CAP_ACCESS_GROUPS_GRANTED_TO_VOLUME, LSM_CAP_VOLUME_CHILD_DEPENDENCY, LSM_CAP_VOLUME_CHILD_DEPENDENCY_RM, LSM_CAP_FS, LSM_CAP_FS_DELETE, LSM_CAP_FS_RESIZE, LSM_CAP_FS_CREATE, LSM_CAP_FS_CLONE, LSM_CAP_FILE_CLONE, LSM_CAP_FS_SNAPSHOTS, LSM_CAP_FS_SNAPSHOT_CREATE, LSM_CAP_FS_SNAPSHOT_DELETE, LSM_CAP_FS_SNAPSHOT_RESTORE, LSM_CAP_FS_SNAPSHOT_RESTORE_SPECIFIC_FILES, LSM_CAP_FS_CHILD_DEPENDENCY, LSM_CAP_FS_CHILD_DEPENDENCY_RM, LSM_CAP_FS_CHILD_DEPENDENCY_RM_SPECIFIC_FILES, LSM_CAP_EXPORT_AUTH, LSM_CAP_EXPORTS, LSM_CAP_EXPORT_FS, LSM_CAP_EXPORT_REMOVE, LSM_CAP_VOLUME_RAID_INFO, LSM_CAP_POOL_MEMBER_INFO, -1); if (LSM_ERR_OK != rc) { lsm_capability_record_free(*cap); *cap = NULL; } } return rc; } static int job_status(lsm_plugin_ptr c, const char *job_id, lsm_job_status * status, uint8_t * percent_complete, lsm_data_type * t, void **value, lsm_flag flags) { int rc = LSM_ERR_OK; struct plugin_data *pd = (struct plugin_data *) lsm_private_data_get(c); if (pd) { struct allocated_job *val = (struct allocated_job *) g_hash_table_lookup(pd->jobs, job_id); if (val) { *status = LSM_JOB_INPROGRESS; val->polls += 34; if ((val->polls) >= 100) { *t = val->type; *value = lsm_data_type_copy(val->type, val->return_data); *status = LSM_JOB_COMPLETE; *percent_complete = 100; } else { *percent_complete = val->polls; } } else { rc = LSM_ERR_NOT_FOUND_JOB; } } else { rc = LSM_ERR_INVALID_ARGUMENT; } return rc; } static int list_pools(lsm_plugin_ptr c, const char *search_key, const char *search_value, lsm_pool ** pool_array[], uint32_t * count, lsm_flag flags) { int rc = LSM_ERR_OK; struct plugin_data *pd = (struct plugin_data *) lsm_private_data_get(c); *count = g_hash_table_size(pd->pools); if (*count) { *pool_array = lsm_pool_record_array_alloc(*count); if (*pool_array) { uint32_t i = 0; char *k = NULL; lsm_pool *p = NULL; GHashTableIter iter; g_hash_table_iter_init(&iter, pd->pools); while (g_hash_table_iter_next (&iter, (gpointer) & k, (gpointer) & p)) { (*pool_array)[i] = lsm_pool_record_copy(p); if (!(*pool_array)[i]) { rc = LSM_ERR_NO_MEMORY; lsm_pool_record_array_free(*pool_array, i); *count = 0; *pool_array = NULL; break; } ++i; } } else { rc = lsm_log_error_basic(c, LSM_ERR_NO_MEMORY, "ENOMEM"); } } if (LSM_ERR_OK == rc) { lsm_plug_pool_search_filter(search_key, search_value, *pool_array, count); } return rc; } static int list_systems(lsm_plugin_ptr c, lsm_system ** systems[], uint32_t * system_count, lsm_flag flags) { struct plugin_data *pd = (struct plugin_data *) lsm_private_data_get(c); if (pd) { *system_count = pd->num_systems; *systems = lsm_system_record_array_alloc(MAX_SYSTEMS); if (*systems) { (*systems)[0] = lsm_system_record_copy(pd->system[0]); if ((*systems)[0]) { return LSM_ERR_OK; } else { lsm_system_record_array_free(*systems, pd->num_systems); } } return LSM_ERR_NO_MEMORY; } else { return LSM_ERR_INVALID_ARGUMENT; } } static int job_free(lsm_plugin_ptr c, char *job_id, lsm_flag flags) { int rc = LSM_ERR_OK; struct plugin_data *pd = (struct plugin_data *) lsm_private_data_get(c); if (pd) { if (!g_hash_table_remove(pd->jobs, job_id)) { rc = LSM_ERR_NOT_FOUND_JOB; } } else { rc = LSM_ERR_INVALID_ARGUMENT; } return rc; } static struct lsm_mgmt_ops_v1 mgm_ops = { tmo_set, tmo_get, cap, job_status, job_free, list_pools, list_systems, }; static int list_volumes(lsm_plugin_ptr c, const char *search_key, const char *search_value, lsm_volume ** vols[], uint32_t * count, lsm_flag flags) { int rc = LSM_ERR_OK; struct plugin_data *pd = (struct plugin_data *) lsm_private_data_get(c); *count = g_hash_table_size(pd->volumes); if (*count) { *vols = lsm_volume_record_array_alloc(*count); if (*vols) { uint32_t i = 0; char *k = NULL; struct allocated_volume *vol; GHashTableIter iter; g_hash_table_iter_init(&iter, pd->volumes); while (g_hash_table_iter_next (&iter, (gpointer) & k, (gpointer) & vol)) { (*vols)[i] = lsm_volume_record_copy(vol->v); if (!(*vols)[i]) { rc = LSM_ERR_NO_MEMORY; lsm_volume_record_array_free(*vols, i); *count = 0; *vols = NULL; break; } ++i; } } else { rc = lsm_log_error_basic(c, LSM_ERR_NO_MEMORY, "ENOMEM"); } } if (LSM_ERR_OK == rc) { lsm_plug_volume_search_filter(search_key, search_value, *vols, count); } return rc; } static int list_disks(lsm_plugin_ptr c, const char *search_key, const char *search_value, lsm_disk ** disks[], uint32_t * count, lsm_flag flags) { int rc = LSM_ERR_OK; struct plugin_data *pd = (struct plugin_data *) lsm_private_data_get(c); *count = g_hash_table_size(pd->disks); if (*count) { *disks = lsm_disk_record_array_alloc(*count); if (*disks) { uint32_t i = 0; char *k = NULL; lsm_disk *disk; GHashTableIter iter; g_hash_table_iter_init(&iter, pd->disks); while (g_hash_table_iter_next (&iter, (gpointer) & k, (gpointer) & disk)) { (*disks)[i] = lsm_disk_record_copy(disk); if (!(*disks)[i]) { rc = LSM_ERR_NO_MEMORY; lsm_disk_record_array_free(*disks, i); *count = 0; *disks = NULL; break; } ++i; } } else { rc = lsm_log_error_basic(c, LSM_ERR_NO_MEMORY, "ENOMEM"); } } if (LSM_ERR_OK == rc) { lsm_plug_disk_search_filter(search_key, search_value, *disks, count); } return rc; } static int list_targets(lsm_plugin_ptr c, const char *search_key, const char *search_value, lsm_target_port ** tp[], uint32_t * count, lsm_flag flags) { uint32_t i = 0; const char p0[] = "50:0a:09:86:99:4b:8d:c5"; const char p1[] = "50:0a:09:86:99:4b:8d:c6"; int rc = LSM_ERR_OK; *count = 5; *tp = lsm_target_port_record_array_alloc(*count); if (*tp) { (*tp)[0] = lsm_target_port_record_alloc("TGT_PORT_ID_01", LSM_TARGET_PORT_TYPE_FC, p0, p0, p0, "FC_a_0b", sys_id, NULL); (*tp)[1] = lsm_target_port_record_alloc("TGT_PORT_ID_02", LSM_TARGET_PORT_TYPE_FCOE, p1, p1, p1, "FC_a_0c", sys_id, NULL); (*tp)[2] = lsm_target_port_record_alloc("TGT_PORT_ID_03", LSM_TARGET_PORT_TYPE_ISCSI, "iqn.1986-05.com.example:sim-tgt-03", "sim-iscsi-tgt-3.example.com:3260", "a4:4e:31:47:f4:e0", "iSCSI_c_0d", sys_id, NULL); (*tp)[3] = lsm_target_port_record_alloc("TGT_PORT_ID_04", LSM_TARGET_PORT_TYPE_ISCSI, "iqn.1986-05.com.example:sim-tgt-03", "10.0.0.1:3260", "a4:4e:31:47:f4:e1", "iSCSI_c_0e", sys_id, NULL); (*tp)[4] = lsm_target_port_record_alloc("TGT_PORT_ID_05", LSM_TARGET_PORT_TYPE_ISCSI, "iqn.1986-05.com.example:sim-tgt-03", "[2001:470:1f09:efe:a64e:31ff::1]:3260", "a4:4e:31:47:f4:e1", "iSCSI_c_0e", sys_id, NULL); for (i = 0; i < *count; ++i) { if (!(*tp)[i]) { rc = lsm_log_error_basic(c, LSM_ERR_NO_MEMORY, "ENOMEM"); lsm_target_port_record_array_free(*tp, *count); *count = 0; break; } } } else { rc = lsm_log_error_basic(c, LSM_ERR_NO_MEMORY, "ENOMEM"); *count = 0; } if (LSM_ERR_OK == rc) { lsm_plug_target_port_search_filter(search_key, search_value, *tp, count); } return rc; } static uint64_t pool_allocate(lsm_pool * p, uint64_t size) { uint64_t rounded_size = 0; uint64_t free_space = lsm_pool_free_space_get(p); rounded_size = (size / BS) * BS; if (free_space >= rounded_size) { free_space -= rounded_size; lsm_pool_free_space_set(p, free_space); } else { rounded_size = 0; } return rounded_size; } void pool_deallocate(lsm_pool * p, uint64_t size) { uint64_t free_space = lsm_pool_free_space_get(p); free_space += size; lsm_pool_free_space_set(p, free_space); } static lsm_pool *find_pool(struct plugin_data *pd, const char *pool_id) { return (lsm_pool *) g_hash_table_lookup(pd->pools, pool_id); } static struct allocated_volume *find_volume(struct plugin_data *pd, const char *vol_id) { struct allocated_volume *rc = g_hash_table_lookup(pd->volumes, vol_id); return rc; } static struct allocated_volume *find_volume_name(struct plugin_data *pd, const char *name) { struct allocated_volume *found = NULL; char *k = NULL; struct allocated_volume *vol; GHashTableIter iter; g_hash_table_iter_init(&iter, pd->volumes); while (g_hash_table_iter_next(&iter, (gpointer) & k, (gpointer) & vol)) { if (strcmp(lsm_volume_name_get(vol->v), name) == 0) { found = vol; break; } } return found; } static int volume_create(lsm_plugin_ptr c, lsm_pool * pool, const char *volume_name, uint64_t size, lsm_volume_provision_type provisioning, lsm_volume ** new_volume, char **job, lsm_flag flags) { int rc = LSM_ERR_OK; struct plugin_data *pd = (struct plugin_data *) lsm_private_data_get(c); lsm_pool *p = find_pool(pd, lsm_pool_id_get(pool)); if (p) { if (!find_volume_name(pd, volume_name)) { uint64_t allocated_size = pool_allocate(p, size); if (allocated_size) { char *id = md5(volume_name); /* We create one to return and a copy to store in memory */ lsm_volume *v = lsm_volume_record_alloc(id, volume_name, "60a980003246694a412b45673342616e", BS, allocated_size/BS, 0, sys_id, lsm_pool_id_get(pool), NULL); lsm_volume *to_store = lsm_volume_record_copy(v); struct allocated_volume *av = malloc(sizeof(struct allocated_volume)); if (v && av && to_store) { av->v = to_store; av->p = p; /* * Make a copy of the key, as we may replace the volume, * but leave the key. */ g_hash_table_insert(pd->volumes, (gpointer) strdup(lsm_volume_id_get(to_store)), (gpointer) av); rc = create_job(pd, job, LSM_DATA_TYPE_VOLUME, v, (void **) new_volume); } else { free(av); lsm_volume_record_free(v); lsm_volume_record_free(to_store); rc = lsm_log_error_basic(c, LSM_ERR_NO_MEMORY, "Check for leaks"); } } else { rc = lsm_log_error_basic(c, LSM_ERR_NOT_ENOUGH_SPACE, "Insufficient space in pool"); } } else { rc = lsm_log_error_basic(c, LSM_ERR_NAME_CONFLICT, "Existing volume " "with name"); } } else { rc = lsm_log_error_basic(c, LSM_ERR_NOT_FOUND_POOL, "Pool not found!"); } return rc; } static int volume_replicate(lsm_plugin_ptr c, lsm_pool * pool, lsm_replication_type rep_type, lsm_volume * volume_src, const char *name, lsm_volume ** new_replicant, char **job, lsm_flag flags) { int rc = LSM_ERR_OK; lsm_pool *pool_to_use = NULL; struct plugin_data *pd = (struct plugin_data *) lsm_private_data_get(c); if (pool) { pool_to_use = find_pool(pd, lsm_pool_id_get(pool)); } else { pool_to_use = find_pool(pd, lsm_volume_pool_id_get(volume_src)); } if (!pool_to_use) { rc = lsm_log_error_basic(c, LSM_ERR_NOT_FOUND_POOL, "Pool not found!"); } else { if (find_volume(pd, lsm_volume_id_get(volume_src))) { rc = volume_create(c, pool_to_use, name, lsm_volume_number_of_blocks_get(volume_src) * BS, LSM_VOLUME_PROVISION_DEFAULT, new_replicant, job, flags); } else { rc = lsm_log_error_basic(c, LSM_ERR_NOT_FOUND_VOLUME, "Volume not found!"); } } return rc; } static int volume_replicate_range_bs(lsm_plugin_ptr c, lsm_system * system, uint32_t * bs, lsm_flag flags) { *bs = BS; return LSM_ERR_OK; } static int volume_replicate_range(lsm_plugin_ptr c, lsm_replication_type rep_type, lsm_volume * source, lsm_volume * dest, lsm_block_range ** ranges, uint32_t num_ranges, char **job, lsm_flag flags) { int rc = LSM_ERR_OK; struct plugin_data *pd = (struct plugin_data *) lsm_private_data_get(c); struct allocated_volume *src_v = find_volume(pd, lsm_volume_id_get(source)); struct allocated_volume *dest_v = find_volume(pd, lsm_volume_id_get(dest)); if (!src_v || !dest_v) { rc = lsm_log_error_basic(c, LSM_ERR_NOT_FOUND_VOLUME, "Src or dest volumes not found!"); } else { rc = create_job(pd, job, LSM_DATA_TYPE_NONE, NULL, NULL); } return rc; } static int volume_resize(lsm_plugin_ptr c, lsm_volume * volume, uint64_t new_size, lsm_volume ** resized_volume, char **job, lsm_flag flags) { int rc = LSM_ERR_OK; struct plugin_data *pd = (struct plugin_data *) lsm_private_data_get(c); struct allocated_volume *av = find_volume(pd, lsm_volume_id_get(volume)); if (av) { lsm_volume *v = av->v; lsm_pool *p = av->p; uint64_t curr_size = lsm_volume_number_of_blocks_get(v) * BS; pool_deallocate(p, curr_size); uint64_t resized_size = pool_allocate(p, new_size); if (resized_size) { lsm_volume *vp = lsm_volume_record_alloc(lsm_volume_id_get(v), lsm_volume_name_get(v), lsm_volume_vpd83_get(v), lsm_volume_block_size_get(v), resized_size/BS, 0, sys_id, lsm_volume_pool_id_get(volume), NULL); if( vp ) { av->v = vp; lsm_volume_record_free(v); rc = create_job(pd, job, LSM_DATA_TYPE_VOLUME, lsm_volume_record_copy(vp), (void **) resized_volume); } else { pool_deallocate(p, resized_size); pool_allocate(p, curr_size); rc = lsm_log_error_basic(c, LSM_ERR_NO_MEMORY, "ENOMEM"); } } else { /*Could not accommodate re-sized, go back */ pool_allocate(p, curr_size); rc = lsm_log_error_basic(c, LSM_ERR_NOT_ENOUGH_SPACE, "Insufficient space in pool"); } } else { rc = lsm_log_error_basic(c, LSM_ERR_NOT_FOUND_VOLUME, "volume not found!"); } return rc; } static int _volume_delete(lsm_plugin_ptr c, const char *volume_id) { int rc = LSM_ERR_OK; GHashTableIter iter; char *k = NULL; GHashTable *v = NULL; struct plugin_data *pd = (struct plugin_data *) lsm_private_data_get(c); struct allocated_volume *av = find_volume(pd, volume_id); if (av) { lsm_volume *vp = av->v; pool_deallocate(av->p, lsm_volume_number_of_blocks_get(vp) * BS); g_hash_table_remove(pd->volumes, volume_id); g_hash_table_iter_init(&iter, pd->group_grant); while (g_hash_table_iter_next (&iter, (gpointer) & k, (gpointer) & v)) { if (g_hash_table_lookup(v, volume_id)) { g_hash_table_remove(v, volume_id); } } } else { rc = lsm_log_error_basic(c, LSM_ERR_NOT_FOUND_VOLUME, "volume not found!"); } return rc; } static int volume_delete(lsm_plugin_ptr c, lsm_volume * volume, char **job, lsm_flag flags) { lsm_access_group **groups = NULL; uint32_t count = 0; struct plugin_data *pd = (struct plugin_data *) lsm_private_data_get(c); // Check to see if this volume is masked to any access groups, if it is we // will return an IS_MASKED error code. int rc = ag_granted_to_volume(c, volume, &groups, &count, LSM_CLIENT_FLAG_RSVD); if (LSM_ERR_OK == rc) { lsm_access_group_record_array_free(groups, count); groups = NULL; if (!count) { rc = _volume_delete(c, lsm_volume_id_get(volume)); if (LSM_ERR_OK == rc) { rc = create_job(pd, job, LSM_DATA_TYPE_NONE, NULL, NULL); } } else { rc = lsm_log_error_basic(c, LSM_ERR_IS_MASKED, "Volume is masked!"); } } return rc; } static int volume_raid_info(lsm_plugin_ptr c, lsm_volume * volume, lsm_volume_raid_type * raid_type, uint32_t * strip_size, uint32_t * disk_count, uint32_t * min_io_size, uint32_t * opt_io_size, lsm_flag flags) { int rc = LSM_ERR_OK; struct plugin_data *pd = (struct plugin_data *) lsm_private_data_get(c); struct allocated_volume *av = find_volume(pd, lsm_volume_id_get(volume)); if (!av) { rc = lsm_log_error_basic(c, LSM_ERR_NOT_FOUND_VOLUME, "volume not found!"); } *raid_type = LSM_VOLUME_RAID_TYPE_UNKNOWN; *strip_size = LSM_VOLUME_STRIP_SIZE_UNKNOWN; *disk_count = LSM_VOLUME_DISK_COUNT_UNKNOWN; *min_io_size = LSM_VOLUME_MIN_IO_SIZE_UNKNOWN; *opt_io_size = LSM_VOLUME_OPT_IO_SIZE_UNKNOWN; return rc; } static int pool_member_info(lsm_plugin_ptr c, lsm_pool * pool, lsm_volume_raid_type * raid_type, lsm_pool_member_type * member_type, lsm_string_list ** member_ids, lsm_flag flags) { int rc = LSM_ERR_OK; struct plugin_data *pd = (struct plugin_data *) lsm_private_data_get(c); lsm_pool *p = find_pool(pd, lsm_pool_id_get(pool)); if (!p) { rc = lsm_log_error_basic(c, LSM_ERR_NOT_FOUND_POOL, "Pool not found!"); } *raid_type = LSM_VOLUME_RAID_TYPE_UNKNOWN; *member_type = LSM_POOL_MEMBER_TYPE_UNKNOWN; *member_ids = NULL; return rc; } static int volume_raid_create_cap_get(lsm_plugin_ptr c, lsm_system * system, uint32_t ** supported_raid_types, uint32_t * supported_raid_type_count, uint32_t ** supported_strip_sizes, uint32_t * supported_strip_size_count, lsm_flag flags) { return LSM_ERR_NO_SUPPORT; } static int volume_raid_create(lsm_plugin_ptr c, const char *name, lsm_volume_raid_type raid_type, lsm_disk * disks[], uint32_t disk_count, uint32_t strip_size, lsm_volume ** new_volume, lsm_flag flags) { return LSM_ERR_NO_SUPPORT; } static struct lsm_ops_v1_2 ops_v1_2 = { volume_raid_info, pool_member_info, volume_raid_create_cap_get, volume_raid_create, }; static int volume_enable_disable(lsm_plugin_ptr c, lsm_volume * v, lsm_flag flags) { int rc = LSM_ERR_OK; struct plugin_data *pd = (struct plugin_data *) lsm_private_data_get(c); struct allocated_volume *av = find_volume(pd, lsm_volume_id_get(v)); if (!av) { rc = lsm_log_error_basic(c, LSM_ERR_NOT_FOUND_VOLUME, "volume not found!"); } return rc; } static int access_group_list(lsm_plugin_ptr c, const char *search_key, const char *search_value, lsm_access_group ** groups[], uint32_t * group_count, lsm_flag flags) { int rc = LSM_ERR_OK; struct plugin_data *pd = (struct plugin_data *) lsm_private_data_get(c); *group_count = g_hash_table_size(pd->access_groups); if (*group_count) { *groups = lsm_access_group_record_array_alloc(*group_count); if (*groups) { int i = 0; char *key = NULL; struct allocated_ag *val = NULL; GHashTableIter iter; g_hash_table_iter_init(&iter, pd->access_groups); while (g_hash_table_iter_next(&iter, (gpointer) & key, (gpointer) & val)) { (*groups)[i] = lsm_access_group_record_copy(val->ag); if (!(*groups)[i]) { rc = LSM_ERR_NO_MEMORY; lsm_access_group_record_array_free(*groups, i); *group_count = 0; groups = NULL; break; } ++i; } } else { rc = LSM_ERR_NO_MEMORY; } } if (LSM_ERR_OK == rc) { lsm_plug_access_group_search_filter(search_key, search_value, *groups, group_count); } return rc; } static int _find_dup_init(struct plugin_data *pd, const char *initiator_id) { GList *all_aags = g_hash_table_get_values(pd->access_groups); guint y; int rc = 1; for (y = 0; y < g_list_length(all_aags); ++y) { struct allocated_ag *cur_aag = (struct allocated_ag *) g_list_nth_data(all_aags, y); if (cur_aag) { lsm_string_list *inits = lsm_access_group_initiator_id_get(cur_aag->ag); int i; for (i = 0; i < lsm_string_list_size(inits); ++i) { const char *cur_init_id = lsm_string_list_elem_get(inits, i); if (strcmp(initiator_id, cur_init_id) == 0) { rc = 0; break; } } if (rc == 0) { break; } else { cur_aag = (struct allocated_ag *) g_list_next(all_aags); } } } g_list_free(all_aags); return rc; } static int access_group_create(lsm_plugin_ptr c, const char *name, const char *initiator_id, lsm_access_group_init_type id_type, lsm_system * system, lsm_access_group ** access_group, lsm_flag flags) { int rc = LSM_ERR_OK; lsm_access_group *ag = NULL; struct allocated_ag *aag = NULL; struct plugin_data *pd = (struct plugin_data *) lsm_private_data_get(c); char *id = strdup(md5(name)); struct allocated_ag *find = (struct allocated_ag *) g_hash_table_lookup(pd->access_groups, id); if (!find) { // check initiator conflict if (_find_dup_init(pd, initiator_id) == 0) { rc = lsm_log_error_basic(c, LSM_ERR_EXISTS_INITIATOR, "Requested initiator is used by other access group"); } else { lsm_string_list *initiators = lsm_string_list_alloc(1); if (initiators && id && (LSM_ERR_OK == lsm_string_list_elem_set(initiators, 0, initiator_id))) { ag = lsm_access_group_record_alloc(id, name, initiators, id_type, lsm_system_id_get (system), NULL); aag = alloc_allocated_ag(ag, id_type); if (ag && aag) { g_hash_table_insert(pd->access_groups, (gpointer) id, (gpointer) aag); *access_group = lsm_access_group_record_copy(ag); } else { free_allocated_ag(aag); lsm_access_group_record_free(ag); rc = lsm_log_error_basic(c, LSM_ERR_NO_MEMORY, "ENOMEM"); } } else { rc = lsm_log_error_basic(c, LSM_ERR_NO_MEMORY, "ENOMEM"); } /* Initiators is copied when allocating a group record */ lsm_string_list_free(initiators); } } else { rc = lsm_log_error_basic(c, LSM_ERR_NAME_CONFLICT, "access group with same id found"); } /* * If we were not successful free memory for id string, id is on the heap * because it is passed to the hash table. */ if (LSM_ERR_OK != rc) { free(id); } return rc; } static int access_group_delete(lsm_plugin_ptr c, lsm_access_group * group, lsm_flag flags) { int rc = LSM_ERR_OK; lsm_volume **volumes = NULL; uint32_t count = 0; struct plugin_data *pd = (struct plugin_data *) lsm_private_data_get(c); const char *id = lsm_access_group_id_get(group); rc = vol_accessible_by_ag(c, group, &volumes, &count, LSM_CLIENT_FLAG_RSVD); lsm_volume_record_array_free(volumes, count); volumes = NULL; if (rc == LSM_ERR_OK) { if (count) { rc = lsm_log_error_basic(c, LSM_ERR_IS_MASKED, "access group has masked volumes!"); } else { gboolean r = g_hash_table_remove(pd->access_groups, (gpointer) id); if (!r) { rc = lsm_log_error_basic(c, LSM_ERR_NOT_FOUND_ACCESS_GROUP, "access group not found"); } else { g_hash_table_remove(pd->group_grant, id); } if (!g_hash_table_size(pd->access_groups)) { assert(g_hash_table_size(pd->group_grant) == 0); } } } return rc; } static int access_group_initiator_add(lsm_plugin_ptr c, lsm_access_group * group, const char *initiator_id, lsm_access_group_init_type id_type, lsm_access_group ** updated_access_group, lsm_flag flags) { int rc = LSM_ERR_OK; struct plugin_data *pd = (struct plugin_data *) lsm_private_data_get(c); struct allocated_ag *find = (struct allocated_ag *) g_hash_table_lookup(pd->access_groups, lsm_access_group_id_get(group)); if (find) { lsm_string_list *inits = lsm_access_group_initiator_id_get(find->ag); rc = lsm_string_list_append(inits, initiator_id); if (LSM_ERR_OK == rc) { *updated_access_group = lsm_access_group_record_copy(find->ag); if (!*updated_access_group) { rc = LSM_ERR_NO_MEMORY; } } } else { rc = lsm_log_error_basic(c, LSM_ERR_NOT_FOUND_ACCESS_GROUP, "access group not found"); } return rc; } static int access_group_initiator_delete(lsm_plugin_ptr c, lsm_access_group * group, const char *initiator_id, lsm_access_group_init_type id_type, lsm_access_group ** updated_access_group, lsm_flag flags) { int rc = LSM_ERR_INVALID_ARGUMENT; struct plugin_data *pd = (struct plugin_data *) lsm_private_data_get(c); struct allocated_ag *find = (struct allocated_ag *) g_hash_table_lookup(pd->access_groups, lsm_access_group_id_get(group)); if (find) { uint32_t i; lsm_string_list *inits = lsm_access_group_initiator_id_get(find->ag); for (i = 0; i < lsm_string_list_size(inits); ++i) { if (strcmp(initiator_id, lsm_string_list_elem_get(inits, i)) == 0) { lsm_string_list_delete(inits, i); rc = LSM_ERR_OK; break; } } if (LSM_ERR_OK == rc) { *updated_access_group = lsm_access_group_record_copy(find->ag); if (!*updated_access_group) { rc = LSM_ERR_NO_MEMORY; } } } else { rc = lsm_log_error_basic(c, LSM_ERR_NOT_FOUND_ACCESS_GROUP, "access group not found"); } return rc; } static int volume_mask(lsm_plugin_ptr c, lsm_access_group * group, lsm_volume * volume, lsm_flag flags) { int rc = LSM_ERR_OK; struct plugin_data *pd = (struct plugin_data *) lsm_private_data_get(c); struct allocated_ag *find = (struct allocated_ag *) g_hash_table_lookup(pd->access_groups, lsm_access_group_id_get(group)); struct allocated_volume *av = find_volume(pd, lsm_volume_id_get(volume)); if (find && av) { GHashTable *grants = g_hash_table_lookup(pd->group_grant, lsm_access_group_id_get (find->ag)); if (!grants) { /* We don't have any mappings for this access group */ GHashTable *grant = g_hash_table_new_full(g_str_hash, g_str_equal, free, free); char *key = strdup(lsm_access_group_id_get(find->ag)); char *vol_id = strdup(lsm_volume_id_get(volume)); int *val = (int *) malloc(sizeof(int)); if (grant && key && val && vol_id) { *val = 1; /* Create the association for volume id and access value */ g_hash_table_insert(grant, vol_id, val); /* Create the association for access groups */ g_hash_table_insert(pd->group_grant, key, grant); } else { rc = LSM_ERR_NO_MEMORY; free(key); free(val); free(vol_id); if (grant) { g_hash_table_destroy(grant); grant = NULL; } } } else { /* See if we have this volume in the access grants */ char *vol_id = g_hash_table_lookup(grants, lsm_volume_id_get(volume)); if (!vol_id) { vol_id = strdup(lsm_volume_id_get(volume)); int *val = (int *) malloc(sizeof(int)); if (vol_id && val) { *val = 1; g_hash_table_insert(grants, vol_id, val); } else { rc = LSM_ERR_NO_MEMORY; free(vol_id); free(val); } } else { rc = LSM_ERR_NO_STATE_CHANGE; } } } else { if (!av) { rc = lsm_log_error_basic(c, LSM_ERR_NOT_FOUND_VOLUME, "volume not found"); } else { rc = lsm_log_error_basic(c, LSM_ERR_NOT_FOUND_ACCESS_GROUP, "access group not found"); } } return rc; } static int volume_unmask(lsm_plugin_ptr c, lsm_access_group * group, lsm_volume * volume, lsm_flag flags) { int rc = LSM_ERR_NO_STATE_CHANGE; struct plugin_data *pd = (struct plugin_data *) lsm_private_data_get(c); struct allocated_ag *find = (struct allocated_ag *) g_hash_table_lookup(pd->access_groups, lsm_access_group_id_get(group)); struct allocated_volume *av = find_volume(pd, lsm_volume_id_get(volume)); if (find && av) { GHashTable *grants = g_hash_table_lookup(pd->group_grant, lsm_access_group_id_get (find->ag)); if (grants) { char *vol_id = g_hash_table_lookup(grants, lsm_volume_id_get(volume)); if (vol_id) { g_hash_table_remove(grants, lsm_volume_id_get(volume)); rc = LSM_ERR_OK; } else { rc = LSM_ERR_NO_STATE_CHANGE; } } } else { if (!av) { rc = lsm_log_error_basic(c, LSM_ERR_NOT_FOUND_VOLUME, "volume not found"); } else { rc = lsm_log_error_basic(c, LSM_ERR_NOT_FOUND_ACCESS_GROUP, "access group not found"); } } return rc; } static lsm_volume *get_volume_by_id(struct plugin_data *pd, const char *id) { struct allocated_volume *av = find_volume(pd, id); if (av) { return av->v; } return NULL; } static int vol_accessible_by_ag(lsm_plugin_ptr c, lsm_access_group * group, lsm_volume ** volumes[], uint32_t * count, lsm_flag flags) { int rc = LSM_ERR_OK; struct plugin_data *pd = (struct plugin_data *) lsm_private_data_get(c); struct allocated_ag *find = (struct allocated_ag *) g_hash_table_lookup(pd->access_groups, lsm_access_group_id_get(group)); if (find) { GHashTable *grants = g_hash_table_lookup(pd->group_grant, lsm_access_group_id_get (find->ag)); *count = 0; if (grants && g_hash_table_size(grants)) { *count = g_hash_table_size(grants); GList *keys = g_hash_table_get_keys(grants); *volumes = lsm_volume_record_array_alloc(*count); if (keys && *volumes) { GList *curr = NULL; int i = 0; for (curr = g_list_first(keys); curr != NULL; curr = g_list_next(curr), ++i) { (*volumes)[i] = lsm_volume_record_copy(get_volume_by_id (pd, (char *) curr->data)); if (!(*volumes)[i]) { rc = LSM_ERR_NO_MEMORY; lsm_volume_record_array_free(*volumes, i); *volumes = NULL; *count = 0; break; } } /* Free the keys */ g_list_free(keys); } else { rc = LSM_ERR_NO_MEMORY; } } } else { rc = lsm_log_error_basic(c, LSM_ERR_NOT_FOUND_ACCESS_GROUP, "access group not found"); } return rc; } static lsm_access_group *access_group_by_id(struct plugin_data *pd, const char *key) { struct allocated_ag *find = g_hash_table_lookup(pd->access_groups, key); if (find) { return find->ag; } return NULL; } static int ag_granted_to_volume(lsm_plugin_ptr c, lsm_volume * volume, lsm_access_group ** groups[], uint32_t * count, lsm_flag flags) { int rc = LSM_ERR_OK; GHashTableIter iter; char *k = NULL; GHashTable *v = NULL; struct plugin_data *pd = (struct plugin_data *) lsm_private_data_get(c); const char *volume_id = lsm_volume_id_get(volume); g_hash_table_iter_init(&iter, pd->group_grant); GSList *result = NULL; *count = 0; while (g_hash_table_iter_next(&iter, (gpointer) & k, (gpointer) & v)) { if (g_hash_table_lookup(v, volume_id)) { *count += 1; result = g_slist_prepend(result, access_group_by_id(pd, k)); } } if (*count) { int i = 0; *groups = lsm_access_group_record_array_alloc(*count); GSList *siter = NULL; if (*groups) { for (siter = result; siter; siter = g_slist_next(siter), i++) { (*groups)[i] = lsm_access_group_record_copy((lsm_access_group *) siter->data); if (!(*groups)[i]) { rc = LSM_ERR_NO_MEMORY; lsm_access_group_record_array_free(*groups, i); *groups = NULL; *count = 0; break; } } } else { rc = LSM_ERR_NO_MEMORY; } } if (result) { g_slist_free(result); } return rc; } int static volume_dependency(lsm_plugin_ptr c, lsm_volume * volume, uint8_t * yes, lsm_flag flags) { struct plugin_data *pd = (struct plugin_data *) lsm_private_data_get(c); struct allocated_volume *av = find_volume(pd, lsm_volume_id_get(volume)); if (av) { *yes = 0; return LSM_ERR_OK; } else { return LSM_ERR_NOT_FOUND_VOLUME; } } int static volume_dependency_rm(lsm_plugin_ptr c, lsm_volume * volume, char **job, lsm_flag flags) { struct plugin_data *pd = (struct plugin_data *) lsm_private_data_get(c); struct allocated_volume *av = find_volume(pd, lsm_volume_id_get(volume)); if (av) { return create_job(pd, job, LSM_DATA_TYPE_NONE, NULL, NULL); } else { return LSM_ERR_NOT_FOUND_VOLUME; } } static int iscsi_chap_auth(lsm_plugin_ptr c, const char *init_id, const char *in_user, const char *in_password, const char *out_user, const char *out_password, lsm_flag flags) { if (init_id) { return 0; } return LSM_ERR_INVALID_ARGUMENT; } static struct lsm_san_ops_v1 san_ops = { list_volumes, list_disks, volume_create, volume_replicate, volume_replicate_range_bs, volume_replicate_range, volume_resize, volume_delete, volume_enable_disable, volume_enable_disable, iscsi_chap_auth, access_group_list, access_group_create, access_group_delete, access_group_initiator_add, access_group_initiator_delete, volume_mask, volume_unmask, vol_accessible_by_ag, ag_granted_to_volume, volume_dependency, volume_dependency_rm, list_targets }; static int fs_list(lsm_plugin_ptr c, const char *search_key, const char *search_value, lsm_fs ** fs[], uint32_t * count, lsm_flag flags) { int rc = LSM_ERR_OK; struct plugin_data *pd = (struct plugin_data *) lsm_private_data_get(c); *count = g_hash_table_size(pd->fs); if (*count) { *fs = lsm_fs_record_array_alloc(*count); if (*fs) { uint32_t i = 0; char *k = NULL; struct allocated_fs *afs = NULL; GHashTableIter iter; g_hash_table_iter_init(&iter, pd->fs); while (g_hash_table_iter_next (&iter, (gpointer) & k, (gpointer) & afs)) { (*fs)[i] = lsm_fs_record_copy(afs->fs); if (!(*fs)[i]) { rc = LSM_ERR_NO_MEMORY; lsm_fs_record_array_free(*fs, i); *count = 0; *fs = NULL; break; } ++i; } } else { rc = lsm_log_error_basic(c, LSM_ERR_NO_MEMORY, "ENOMEM"); } } if (LSM_ERR_OK == rc) { lsm_plug_fs_search_filter(search_key, search_value, *fs, count); } return rc; } static int fs_create(lsm_plugin_ptr c, lsm_pool * pool, const char *name, uint64_t size_bytes, lsm_fs ** fs, char **job, lsm_flag flags) { int rc = LSM_ERR_OK; struct plugin_data *pd = (struct plugin_data *) lsm_private_data_get(c); lsm_pool *p = find_pool(pd, lsm_pool_id_get(pool)); if (p && !g_hash_table_lookup(pd->fs, md5(name))) { uint64_t allocated_size = pool_allocate(p, size_bytes); if (allocated_size) { char *id = md5(name); char *key = strdup(id); lsm_fs *new_fs = NULL; /* Make a copy to store and a copy to hand back to caller */ lsm_fs *tfs = lsm_fs_record_alloc(id, name, allocated_size, allocated_size, lsm_pool_id_get(pool), sys_id, NULL); new_fs = lsm_fs_record_copy(tfs); /* Allocate the memory to keep the associations */ struct allocated_fs *afs = alloc_fs_record(); if (key && tfs && afs) { afs->fs = tfs; afs->p = p; g_hash_table_insert(pd->fs, key, afs); rc = create_job(pd, job, LSM_DATA_TYPE_FS, new_fs, (void **) fs); } else { free(key); lsm_fs_record_free(new_fs); lsm_fs_record_free(tfs); free_fs_record(afs); *fs = NULL; rc = lsm_log_error_basic(c, LSM_ERR_NO_MEMORY, "ENOMEM"); } } else { rc = lsm_log_error_basic(c, LSM_ERR_NOT_ENOUGH_SPACE, "Insufficient space in pool"); } } else { if (p == NULL) { rc = lsm_log_error_basic(c, LSM_ERR_NOT_FOUND_POOL, "Pool not found!"); } else { rc = lsm_log_error_basic(c, LSM_ERR_NAME_CONFLICT, "File system with name exists"); } } return rc; } static int fs_delete(lsm_plugin_ptr c, lsm_fs * fs, char **job, lsm_flag flags) { int rc = LSM_ERR_OK; struct plugin_data *pd = (struct plugin_data *) lsm_private_data_get(c); if (!g_hash_table_remove(pd->fs, lsm_fs_id_get(fs))) { rc = lsm_log_error_basic(c, LSM_ERR_NOT_FOUND_FS, "FS not found!"); } else { rc = create_job(pd, job, LSM_DATA_TYPE_NONE, NULL, NULL); } return rc; } static int fs_resize(lsm_plugin_ptr c, lsm_fs * fs, uint64_t new_size_bytes, lsm_fs * *rfs, char **job, lsm_flag flags) { int rc = LSM_ERR_OK; struct plugin_data *pd = (struct plugin_data *) lsm_private_data_get(c); struct allocated_fs *afs = g_hash_table_lookup(pd->fs, lsm_fs_id_get(fs)); *rfs = NULL; *job = NULL; if (afs) { lsm_pool *p = afs->p; lsm_fs *tfs = afs->fs; pool_deallocate(p, lsm_fs_total_space_get(tfs)); uint64_t resized_size = pool_allocate(p, new_size_bytes); if (resized_size) { lsm_fs *resized = lsm_fs_record_alloc(lsm_fs_id_get(tfs), lsm_fs_name_get(tfs), new_size_bytes, new_size_bytes, lsm_fs_pool_id_get(tfs), lsm_fs_system_id_get(tfs), NULL); lsm_fs *returned_copy = lsm_fs_record_copy(resized); if (resized && returned_copy) { lsm_fs_record_free(tfs); afs->fs = resized; rc = create_job(pd, job, LSM_DATA_TYPE_FS, returned_copy, (void **) rfs); } else { lsm_fs_record_free(resized); lsm_fs_record_free(returned_copy); *rfs = NULL; pool_deallocate(p, new_size_bytes); pool_allocate(p, lsm_fs_total_space_get(tfs)); rc = lsm_log_error_basic(c, LSM_ERR_NO_MEMORY, "ENOMEM"); } } else { /*Could not accommodate re-sized, go back */ pool_allocate(p, lsm_fs_total_space_get(tfs)); rc = lsm_log_error_basic(c, LSM_ERR_NOT_ENOUGH_SPACE, "Insufficient space in pool"); } } else { rc = lsm_log_error_basic(c, LSM_ERR_NOT_FOUND_FS, "file system not found!"); } return rc; } static int fs_clone(lsm_plugin_ptr c, lsm_fs * src_fs, const char *dest_fs_name, lsm_fs ** cloned_fs, lsm_fs_ss * optional_snapshot, char **job, lsm_flag flags) { int rc = LSM_ERR_OK; struct plugin_data *pd = (struct plugin_data *) lsm_private_data_get(c); struct allocated_fs *find = g_hash_table_lookup(pd->fs, lsm_fs_id_get(src_fs)); if (find) { rc = fs_create(c, find->p, dest_fs_name, lsm_fs_total_space_get(find->fs), cloned_fs, job, flags); } else { rc = lsm_log_error_basic(c, LSM_ERR_NOT_FOUND_FS, "Source fs not found"); } return rc; } static int fs_file_clone(lsm_plugin_ptr c, lsm_fs * fs, const char *src_file_name, const char *dest_file_name, lsm_fs_ss * snapshot, char **job, lsm_flag flags) { int rc = LSM_ERR_OK; struct plugin_data *pd = (struct plugin_data *) lsm_private_data_get(c); struct allocated_fs *find = (struct allocated_fs *) g_hash_table_lookup(pd->fs, lsm_fs_id_get(fs)); if (!find) { rc = lsm_log_error_basic(c, LSM_ERR_NOT_FOUND_FS, "fs not found"); } else { rc = create_job(pd, job, LSM_DATA_TYPE_NONE, NULL, NULL); } return rc; } static int fs_child_dependency(lsm_plugin_ptr c, lsm_fs * fs, lsm_string_list * files, uint8_t * yes) { int rc = LSM_ERR_OK; struct plugin_data *pd = (struct plugin_data *) lsm_private_data_get(c); if (g_hash_table_lookup(pd->fs, lsm_fs_id_get(fs))) { *yes = 0; } else { rc = lsm_log_error_basic(c, LSM_ERR_NOT_FOUND_FS, "fs not found"); } return rc; } static int fs_child_dependency_rm(lsm_plugin_ptr c, lsm_fs * fs, lsm_string_list * files, char **job, lsm_flag flags) { int rc = LSM_ERR_OK; struct plugin_data *pd = (struct plugin_data *) lsm_private_data_get(c); if (!g_hash_table_lookup(pd->fs, lsm_fs_id_get(fs))) { rc = lsm_log_error_basic(c, LSM_ERR_NOT_FOUND_FS, "fs not found"); } else { rc = create_job(pd, job, LSM_DATA_TYPE_NONE, NULL, NULL); } return rc; } static int ss_list(lsm_plugin_ptr c, lsm_fs * fs, lsm_fs_ss ** ss[], uint32_t * count, lsm_flag flags) { int rc = LSM_ERR_OK; struct plugin_data *pd = (struct plugin_data *) lsm_private_data_get(c); struct allocated_fs *find = (struct allocated_fs *) g_hash_table_lookup(pd->fs, lsm_fs_id_get(fs)); if (find) { char *k = NULL; lsm_fs_ss *v = NULL; GHashTableIter iter; *ss = NULL; *count = g_hash_table_size(find->ss); if (*count) { *ss = lsm_fs_ss_record_array_alloc(*count); if (*ss) { int i = 0; g_hash_table_iter_init(&iter, find->ss); while (g_hash_table_iter_next(&iter, (gpointer) & k, (gpointer) & v)) { (*ss)[i] = lsm_fs_ss_record_copy(v); if (!(*ss)[i]) { rc = lsm_log_error_basic(c, LSM_ERR_NO_MEMORY, "ENOMEM"); lsm_fs_ss_record_array_free(*ss, i); *ss = NULL; *count = 0; break; } ++i; } } else { rc = lsm_log_error_basic(c, LSM_ERR_NO_MEMORY, "ENOMEM"); *count = 0; } } } else { rc = lsm_log_error_basic(c, LSM_ERR_NOT_FOUND_FS, "fs not found"); } return rc; } static int ss_create(lsm_plugin_ptr c, lsm_fs * fs, const char *name, lsm_fs_ss ** snapshot, char **job, lsm_flag flags) { int rc = LSM_ERR_NO_MEMORY; struct plugin_data *pd = (struct plugin_data *) lsm_private_data_get(c); struct allocated_fs *find = (struct allocated_fs *) g_hash_table_lookup(pd->fs, lsm_fs_id_get(fs)); if (find) { if (!g_hash_table_lookup(find->ss, md5(name))) { char *id = strdup(md5(name)); if (id) { lsm_fs_ss *ss = lsm_fs_ss_record_alloc(id, name, time(NULL), NULL); lsm_fs_ss *new_shot = lsm_fs_ss_record_copy(ss); if (ss && new_shot) { g_hash_table_insert(find->ss, (gpointer) id, (gpointer) ss); rc = create_job(pd, job, LSM_DATA_TYPE_SS, new_shot, (void **) snapshot); } else { lsm_fs_ss_record_free(ss); ss = NULL; lsm_fs_ss_record_free(new_shot); *snapshot = NULL; free(id); id = NULL; } } } else { rc = lsm_log_error_basic(c, LSM_ERR_NAME_CONFLICT, "snapshot name exists"); } } else { rc = lsm_log_error_basic(c, LSM_ERR_NOT_FOUND_FS, "fs not found"); } return rc; } static int ss_delete(lsm_plugin_ptr c, lsm_fs * fs, lsm_fs_ss * ss, char **job, lsm_flag flags) { int rc = LSM_ERR_OK; struct plugin_data *pd = (struct plugin_data *) lsm_private_data_get(c); struct allocated_fs *find = (struct allocated_fs *) g_hash_table_lookup(pd->fs, lsm_fs_id_get(fs)); if (find) { if (!g_hash_table_remove(find->ss, lsm_fs_ss_id_get(ss))) { rc = lsm_log_error_basic(c, LSM_ERR_NOT_FOUND_FS_SS, "snapshot not found"); } else { rc = create_job(pd, job, LSM_DATA_TYPE_NONE, NULL, NULL); } } else { rc = lsm_log_error_basic(c, LSM_ERR_NOT_FOUND_FS, "fs not found"); } return rc; } static int ss_restore(lsm_plugin_ptr c, lsm_fs * fs, lsm_fs_ss * ss, lsm_string_list * files, lsm_string_list * restore_files, int all_files, char **job, lsm_flag flags) { int rc = LSM_ERR_OK; struct plugin_data *pd = (struct plugin_data *) lsm_private_data_get(c); struct allocated_fs *find = (struct allocated_fs *) g_hash_table_lookup(pd->fs, lsm_fs_id_get(fs)); if (find) { if (!g_hash_table_lookup(find->ss, lsm_fs_ss_id_get(ss))) { rc = lsm_log_error_basic(c, LSM_ERR_NOT_FOUND_FS_SS, "snapshot not found"); } else { rc = create_job(pd, job, LSM_DATA_TYPE_NONE, NULL, NULL); } } else { rc = lsm_log_error_basic(c, LSM_ERR_NOT_FOUND_FS, "fs not found"); } return rc; } static struct lsm_fs_ops_v1 fs_ops = { fs_list, fs_create, fs_delete, fs_resize, fs_clone, fs_file_clone, fs_child_dependency, fs_child_dependency_rm, ss_list, ss_create, ss_delete, ss_restore }; static int nfs_auth_types(lsm_plugin_ptr c, lsm_string_list ** types, lsm_flag flags) { int rc = LSM_ERR_OK; *types = lsm_string_list_alloc(1); if (*types) { rc = lsm_string_list_elem_set(*types, 0, "standard"); } else { rc = LSM_ERR_NO_MEMORY; } return rc; } static int nfs_export_list(lsm_plugin_ptr c, const char *search_key, const char *search_value, lsm_nfs_export ** exports[], uint32_t * count, lsm_flag flags) { int rc = LSM_ERR_OK; GHashTableIter fs_iter; GHashTableIter exports_iter; char *k = NULL; struct allocated_fs *v = NULL; GSList *result = NULL; struct plugin_data *pd = (struct plugin_data *) lsm_private_data_get(c); g_hash_table_iter_init(&fs_iter, pd->fs); *count = 0; /* Walk through each of the file systems and their associated exports */ while (g_hash_table_iter_next(&fs_iter, (gpointer) & k, (gpointer) & v)) { char *exp_key = NULL; lsm_nfs_export **exp_val = NULL; g_hash_table_iter_init(&exports_iter, v->exports); while (g_hash_table_iter_next(&exports_iter, (gpointer) & exp_key, (gpointer) & exp_val)) { result = g_slist_prepend(result, exp_val); *count += 1; } } if (*count) { int i = 0; GSList *s_iter = NULL; *exports = lsm_nfs_export_record_array_alloc(*count); if (*exports) { for (s_iter = result; s_iter; s_iter = g_slist_next(s_iter), i++) { (*exports)[i] = lsm_nfs_export_record_copy((lsm_nfs_export *) s_iter->data); if (!(*exports)[i]) { rc = LSM_ERR_NO_MEMORY; lsm_nfs_export_record_array_free(*exports, i); *exports = NULL; *count = 0; break; } } } else { rc = LSM_ERR_NO_MEMORY; } } if (result) { g_slist_free(result); result = NULL; } if (LSM_ERR_OK == rc) { lsm_plug_nfs_export_search_filter(search_key, search_value, *exports, count); } return rc; } static int nfs_export_create(lsm_plugin_ptr c, const char *fs_id, const char *export_path, lsm_string_list *root_list, lsm_string_list *rw_list, lsm_string_list *ro_list, uint64_t anon_uid, uint64_t anon_gid, const char *auth_type, const char *options, lsm_nfs_export **exported, lsm_flag flags) { int rc = LSM_ERR_OK; char auto_export[2048]; struct plugin_data *pd = (struct plugin_data *) lsm_private_data_get(c); struct allocated_fs *fs = g_hash_table_lookup(pd->fs, fs_id); if (fs) { if (!export_path) { snprintf(auto_export, sizeof(auto_export), "/mnt/lsm/nfs/%s", lsm_fs_name_get(fs->fs)); export_path = auto_export; } char *key = strdup(md5(export_path)); *exported = lsm_nfs_export_record_alloc(md5(export_path), fs_id, export_path, auth_type, root_list, rw_list, ro_list, anon_uid, anon_gid, options, NULL); lsm_nfs_export *value = lsm_nfs_export_record_copy(*exported); if (key && *exported && value) { g_hash_table_insert(fs->exports, key, value); } else { rc = LSM_ERR_NO_MEMORY; free(key); lsm_nfs_export_record_free(*exported); lsm_nfs_export_record_free(value); } } else { rc = lsm_log_error_basic(c, LSM_ERR_NOT_FOUND_FS, "fs not found"); } return rc; } static int nfs_export_remove(lsm_plugin_ptr c, lsm_nfs_export * e, lsm_flag flags) { int rc = LSM_ERR_OK; struct plugin_data *pd = (struct plugin_data *) lsm_private_data_get(c); struct allocated_fs *fs = g_hash_table_lookup(pd->fs, lsm_nfs_export_fs_id_get (e)); if (fs) { if (!g_hash_table_remove(fs->exports, lsm_nfs_export_id_get(e))) { rc = lsm_log_error_basic(c, LSM_ERR_NOT_FOUND_NFS_EXPORT, "export not found"); } } else { rc = lsm_log_error_basic(c, LSM_ERR_NOT_FOUND_FS, "fs not found"); } return rc; } static struct lsm_nas_ops_v1 nfs_ops = { nfs_auth_types, nfs_export_list, nfs_export_create, nfs_export_remove }; void free_group_grant_hash(void *v) { g_hash_table_destroy((GHashTable *) v); } void free_allocated_fs(void *v) { free_fs_record((struct allocated_fs *) v); } void free_disk(void *d) { lsm_disk_record_free((lsm_disk *) d); } void free_allocated_volume(void *v) { if (v) { struct allocated_volume *av = (struct allocated_volume *) v; lsm_volume_record_free(av->v); av->v = NULL; av->p = NULL; /* Pool takes care of itself */ free(av); } } static void _unload(struct plugin_data *pd) { int i; if (pd) { if (pd->disks) { g_hash_table_destroy(pd->disks); pd->disks = NULL; } if (pd->jobs) { g_hash_table_destroy(pd->jobs); pd->jobs = NULL; } if (pd->fs) { g_hash_table_destroy(pd->fs); pd->fs = NULL; } if (pd->group_grant) { g_hash_table_destroy(pd->group_grant); pd->group_grant = NULL; } if (pd->access_groups) { g_hash_table_destroy(pd->access_groups); pd->access_groups = NULL; } if (pd->volumes) { g_hash_table_destroy(pd->volumes); pd->volumes = NULL; } if (pd->pools) { g_hash_table_destroy(pd->pools); pd->pools = NULL; } for (i = 0; i < pd->num_systems; ++i) { lsm_system_record_free(pd->system[i]); pd->system[i] = NULL; } pd->num_systems = 0; free(pd); pd = NULL; } } int load(lsm_plugin_ptr c, const char *uri, const char *password, uint32_t timeout, lsm_flag flags) { struct plugin_data *pd = (struct plugin_data *) calloc(1, sizeof(struct plugin_data)); int rc = LSM_ERR_NO_MEMORY; int i; lsm_pool *p = NULL; if (pd) { pd->num_systems = 1; pd->system[0] = lsm_system_record_alloc(sys_id, "LSM simulated storage plug-in", LSM_SYSTEM_STATUS_OK, "", NULL); p = lsm_pool_record_alloc("POOL_3", "lsm_test_aggr", LSM_POOL_ELEMENT_TYPE_FS | LSM_POOL_ELEMENT_TYPE_VOLUME, 0, UINT64_MAX, UINT64_MAX, LSM_POOL_STATUS_OK, "", sys_id, 0); if (p) { pd->pools = g_hash_table_new_full(g_str_hash, g_str_equal, free, free_pool_record); g_hash_table_insert(pd->pools, strdup(lsm_pool_id_get(p)), p); for (i = 0; i < 3; ++i) { char name[32]; snprintf(name, sizeof(name), "POOL_%d", i); p = lsm_pool_record_alloc(name, name, LSM_POOL_ELEMENT_TYPE_FS | LSM_POOL_ELEMENT_TYPE_VOLUME, 0, UINT64_MAX, UINT64_MAX, LSM_POOL_STATUS_OK, "", sys_id, NULL); if (p) { g_hash_table_insert(pd->pools, strdup(lsm_pool_id_get(p)), p); } else { g_hash_table_destroy(pd->pools); pd->pools = NULL; break; } } } pd->volumes = g_hash_table_new_full(g_str_hash, g_str_equal, free, free_allocated_volume); pd->access_groups = g_hash_table_new_full(g_str_hash, g_str_equal, free, free_allocated_ag); /* We will delete the key, but the value will get cleaned up in its own container */ pd->group_grant = g_hash_table_new_full(g_str_hash, g_str_equal, free, free_group_grant_hash); pd->fs = g_hash_table_new_full(g_str_hash, g_str_equal, free, free_allocated_fs); pd->jobs = g_hash_table_new_full(g_str_hash, g_str_equal, free, free_allocated_job); pd->disks = g_hash_table_new_full(g_str_hash, g_str_equal, free, free_disk); for (i = 0; i < 10; ++i) { lsm_disk *d = NULL; char name[17]; char *key = NULL; snprintf(name, sizeof(name), "Sim C disk %d", i); d = lsm_disk_record_alloc(md5(name), name, LSM_DISK_TYPE_SOP, 512, 0x8000000000000, LSM_DISK_STATUS_OK, sys_id); key = strdup(lsm_disk_id_get(d)); if (!key || !d) { g_hash_table_destroy(pd->disks); pd->disks = NULL; lsm_disk_record_free(d); d = NULL; free(key); key = NULL; break; } g_hash_table_insert(pd->disks, key, d); d = NULL; } if (!pd->system[0] || !pd->volumes || !pd->pools || !pd->access_groups || !pd->group_grant || !pd->fs || !pd->jobs || !pd->disks) { rc = LSM_ERR_NO_MEMORY; /* We need to free everything */ _unload(pd); pd = NULL; } else { rc = lsm_register_plugin_v1_2(c, pd, &mgm_ops, &san_ops, &fs_ops, &nfs_ops, &ops_v1_2); } } return rc; } int unload(lsm_plugin_ptr c, lsm_flag flags) { struct plugin_data *pd = (struct plugin_data *) lsm_private_data_get(c); if (pd) { _unload(pd); return LSM_ERR_OK; } else { return LSM_ERR_INVALID_ARGUMENT; } } int main(int argc, char *argv[]) { return lsm_plugin_init_v1(argc, argv, load, unload, name, version); } #ifdef __cplusplus } #endif libstoragemgmt-1.2.3/plugin/nstor/0000775000175000017500000000000012542455463014215 500000000000000libstoragemgmt-1.2.3/plugin/nstor/nstor_lsmplugin0000775000175000017500000000240112537737032017316 00000000000000#!/usr/bin/env python2 # # Copyright (C) 2012 Nexenta Systems, Inc. # All rights reserved. # # This library is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2.1 of the License, or any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library; If not, see . # # Author: legkodymov import sys import syslog import traceback try: from lsm.plugin.nstor.nstor import NexentaStor from lsm import PluginRunner if __name__ == '__main__': PluginRunner(NexentaStor, sys.argv).run() except Exception: #This should be quite rare, but when it does happen this is pretty #key in understanding what happened, especially when it happens when #running from the daemon. msg = str(traceback.format_exc()) syslog.syslog(syslog.LOG_ERR, msg) sys.stderr.write(msg) sys.exit(1) libstoragemgmt-1.2.3/plugin/nstor/__init__.py0000664000175000017500000000000012537546123016232 00000000000000libstoragemgmt-1.2.3/plugin/nstor/nstor.py0000664000175000017500000010244012537737032015654 00000000000000# # Copyright (C) 2013-2014 Nexenta Systems, Inc. # All rights reserved. # # This library is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2.1 of the License, or any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library; If not, see . # # Author: legkodymov # Gris Ge import urllib2 import sys import urlparse try: import simplejson as json except ImportError: import json import base64 import time import traceback import copy from lsm import (AccessGroup, Capabilities, ErrorNumber, FileSystem, INfs, IStorageAreaNetwork, LsmError, NfsExport, Pool, FsSnapshot, System, VERSION, Volume, md5, error, common_urllib2_error_handler, search_property) def handle_nstor_errors(method): def nstor_wrapper(*args, **kwargs): try: return method(*args, **kwargs) except LsmError as lsm: raise except Exception as e: error("Unexpected exception:\n" + traceback.format_exc()) raise LsmError(ErrorNumber.PLUGIN_BUG, str(e), traceback.format_exc()) return nstor_wrapper class NexentaStor(INfs, IStorageAreaNetwork): _V3_PORT = '2000' # Management port for v3.x device _V4_PORT = '8457' # Management port for v4.x device def plugin_info(self, flags=0): # TODO: Change this to something more appropriate return "NexentaStor support", VERSION def __init__(self): self.uparse = None self.password = None self.timeout = None self._system = None self._port = NexentaStor._V3_PORT self._scheme = 'http' def _ns_request(self, path, data): response = None parms = json.dumps(data) url = '%s://%s:%s/%s' % \ (self._scheme, self.uparse.hostname, self._port, path) request = urllib2.Request(url, parms) username = self.uparse.username or 'admin' base64string = base64.encodestring('%s:%s' % (username, self.password))[:-1] request.add_header('Authorization', 'Basic %s' % base64string) request.add_header('Content-Type', 'application/json') try: response = urllib2.urlopen(request, timeout=self.timeout / 1000) except Exception as e: try: common_urllib2_error_handler(e) except LsmError as lsm_e: exc_info = sys.exc_info() if lsm_e.code == ErrorNumber.NETWORK_CONNREFUSED: if not self.uparse.port and \ self._port == NexentaStor._V3_PORT: self._port = NexentaStor._V4_PORT return self._ns_request(path, data) raise exc_info[0], exc_info[1], exc_info[2] resp_json = response.read() resp = json.loads(resp_json) if resp['error']: if 'message' in resp['error']: msg = resp['error']['message'] # Check to see if there is a better way to do this... if 'dataset already exists' in msg: raise LsmError(ErrorNumber.NAME_CONFLICT, msg) if 'Unable to destroy hostgroup' in msg: raise LsmError(ErrorNumber.IS_MASKED, msg) raise LsmError(ErrorNumber.PLUGIN_BUG, resp['error']) return resp['result'] def _request(self, method, obj, params): return self._ns_request('rest/nms', {"method": method, "object": obj, "params": params}) @property def system(self): if self._system is None: license_info = self._request("get_license_info", "appliance", []) fqdn = self._request("get_fqdn", "appliance", []) self._system = System(license_info['machine_sig'], fqdn, System.STATUS_OK, '') return self._system def plugin_register(self, uri, password, timeout, flags=0): self.uparse = urlparse.urlparse(uri) self.password = password or 'nexenta' self.timeout = timeout if self.uparse.port: self._port = self.uparse.port if self.uparse.scheme.lower() == 'nstor+ssl': self._scheme = 'https' @staticmethod def _to_bytes(size): if size.lower().endswith('k'): return int(float(size[:-1]) * 1024) if size.lower().endswith('m'): return int(float(size[:-1]) * 1024 * 1024) if size.lower().endswith('g'): return int(float(size[:-1]) * 1024 * 1024 * 1024) if size.lower().endswith('t'): return int(float(size[:-1]) * 1024 * 1024 * 1024 * 1024) if size.lower().endswith('p'): return int(float(size[:-1]) * 1024 * 1024 * 1024 * 1024 * 1024) if size.lower().endswith('e'): return int( float(size[:-1]) * 1024 * 1024 * 1024 * 1024 * 1024 * 1024) return size @handle_nstor_errors def pools(self, search_key=None, search_value=None, flags=0): pools_list = self._request("get_all_names", "volume", [""]) pools = [] for pool in pools_list: if pool == 'syspool': continue pool_info = self._request("get_child_props", "volume", [str(pool), ""]) pools.append(Pool(pool_info['name'], pool_info['name'], Pool.ELEMENT_TYPE_VOLUME | Pool.ELEMENT_TYPE_VOLUME_THIN | Pool.ELEMENT_TYPE_FS, 0, NexentaStor._to_bytes(pool_info['size']), NexentaStor._to_bytes(pool_info['free']), Pool.STATUS_UNKNOWN, '', self.system.id)) return search_property(pools, search_key, search_value) @handle_nstor_errors def fs(self, search_key=None, search_value=None, flags=0): fs_list = self._request("get_all_names", "folder", [""]) fss = [] pools = {} for fs in fs_list: pool_name = NexentaStor._get_pool_id(fs) if pool_name == 'syspool': continue if pool_name not in pools: pool_info = self._request("get_child_props", "volume", [str(fs), ""]) pools[pool_name] = pool_info else: pool_info = pools[pool_name] fss.append( FileSystem(fs, fs, NexentaStor._to_bytes(pool_info['size']), self._to_bytes(pool_info['available']), pool_name, fs)) return search_property(fss, search_key, search_value) @handle_nstor_errors def fs_create(self, pool, name, size_bytes, flags=0): """ Consider you have 'data' pool and folder 'a' in it (data/a) If you want create 'data/a/b', command line should look like: --create-fs=a/b --pool=data --size=1G """ if name.startswith(pool.name + '/'): chunks = name.split('/')[1:] name = '/'.join(chunks) fs_name = self._request("create", "folder", [pool.name, name])[0] filesystem = FileSystem(fs_name, fs_name, pool.total_space, pool.free_space, pool.id, fs_name) return None, filesystem @handle_nstor_errors def fs_delete(self, fs, flags=0): result = self._request("destroy", "folder", [fs.name, "-r"]) return @handle_nstor_errors def fs_snapshots(self, fs, flags=0): snapshot_list = self._request("get_names", "snapshot", [fs.name]) snapshots = [] for snapshot in snapshot_list: snapshot_info = self._request("get_child_props", "snapshot", [snapshot, "creation_seconds"]) snapshots.append(FsSnapshot(snapshot, snapshot, snapshot_info['creation_seconds'])) return snapshots @handle_nstor_errors def fs_snapshot_create(self, fs, snapshot_name, flags=0): full_name = "%s@%s" % (fs.name, snapshot_name) self._request("create", "snapshot", [full_name, "0"]) snapshot_info = self._request("get_child_props", "snapshot", [full_name, "creation_seconds"]) return None, FsSnapshot(full_name, full_name, snapshot_info['creation_seconds']) @handle_nstor_errors def fs_snapshot_delete(self, fs, snapshot, flags=0): self._request("destroy", "snapshot", [snapshot.name, ""]) return @handle_nstor_errors def time_out_set(self, ms, flags=0): self.timeout = ms return @handle_nstor_errors def time_out_get(self, flags=0): return self.timeout @handle_nstor_errors def plugin_unregister(self, flags=0): return @handle_nstor_errors def job_status(self, job_id, flags=0): return @handle_nstor_errors def job_free(self, job_id, flags=0): return @handle_nstor_errors def capabilities(self, system, flags=0): c = Capabilities() #File system c.set(Capabilities.FS) c.set(Capabilities.FS_DELETE) #c.set(Capabilities.FS_RESIZE) c.set(Capabilities.FS_CREATE) c.set(Capabilities.FS_CLONE) # c.set(Capabilities.FILE_CLONE) c.set(Capabilities.FS_SNAPSHOTS) c.set(Capabilities.FS_SNAPSHOT_CREATE) c.set(Capabilities.FS_SNAPSHOT_DELETE) c.set(Capabilities.FS_SNAPSHOT_RESTORE) # c.set(Capabilities.FS_SNAPSHOT_RESTORE_SPECIFIC_FILES) c.set(Capabilities.FS_CHILD_DEPENDENCY) c.set(Capabilities.FS_CHILD_DEPENDENCY_RM) # c.set(Capabilities.FS_CHILD_DEPENDENCY_RM_SPECIFIC_FILES) # #NFS c.set(Capabilities.EXPORT_AUTH) c.set(Capabilities.EXPORTS) c.set(Capabilities.EXPORT_FS) c.set(Capabilities.EXPORT_REMOVE) c.set(Capabilities.EXPORT_CUSTOM_PATH) # # #Block operations c.set(Capabilities.VOLUMES) c.set(Capabilities.VOLUME_CREATE) c.set(Capabilities.VOLUME_RESIZE) # c.set(Capabilities.VOLUME_REPLICATE) # c.set(Capabilities.VOLUME_REPLICATE_CLONE) # c.set(Capabilities.VOLUME_REPLICATE_COPY) # c.set(Capabilities.VOLUME_REPLICATE_MIRROR_ASYNC) # c.set(Capabilities.VOLUME_REPLICATE_MIRROR_SYNC) # c.set(Capabilities.VOLUME_COPY_RANGE_BLOCK_SIZE) # c.set(Capabilities.VOLUME_COPY_RANGE) # c.set(Capabilities.VOLUME_COPY_RANGE_CLONE) # c.set(Capabilities.VOLUME_COPY_RANGE_COPY) c.set(Capabilities.VOLUME_DELETE) # c.set(Capabilities.VOLUME_ENABLE) # c.set(Capabilities.VOLUME_DISABLE) c.set(Capabilities.VOLUME_MASK) c.set(Capabilities.VOLUME_UNMASK) c.set(Capabilities.ACCESS_GROUPS) c.set(Capabilities.ACCESS_GROUP_CREATE_ISCSI_IQN) c.set(Capabilities.ACCESS_GROUP_DELETE) c.set(Capabilities.ACCESS_GROUP_INITIATOR_ADD_ISCSI_IQN) c.set(Capabilities.ACCESS_GROUP_INITIATOR_DELETE) c.set(Capabilities.VOLUMES_ACCESSIBLE_BY_ACCESS_GROUP) c.set(Capabilities.ACCESS_GROUPS_GRANTED_TO_VOLUME) c.set(Capabilities.VOLUME_CHILD_DEPENDENCY) c.set(Capabilities.VOLUME_CHILD_DEPENDENCY_RM) c.set(Capabilities.VOLUME_ISCSI_CHAP_AUTHENTICATION) return c @handle_nstor_errors def systems(self, flags=0): return [self.system] @handle_nstor_errors def fs_resize(self, fs, new_size_bytes, flags=0): raise LsmError(ErrorNumber.NO_SUPPORT, "Not implemented") @staticmethod def _get_pool_id(fs_name): return fs_name.split('/')[0] @handle_nstor_errors def fs_clone(self, src_fs, dest_fs_name, snapshot=None, flags=0): folder = src_fs.name.split('/')[0] dest = folder + '/' + dest_fs_name if snapshot is None: # User did not supply a snapshot, so we will create one for them name = src_fs.name.split('/')[0] snapshot = self.fs_snapshot_create( src_fs, name + "_clone_ss_" + md5(time.ctime()))[1] self._request("clone", "folder", [snapshot.name, dest]) pool_id = NexentaStor._get_pool_id(dest) pool_info = self._request("get_child_props", "volume", [pool_id, ""]) fs = FileSystem(dest, dest, NexentaStor._to_bytes(pool_info['size']), NexentaStor._to_bytes(pool_info['available']), pool_id, self.system.id) return None, fs @handle_nstor_errors def fs_snapshot_restore(self, fs, snapshot, files, restore_files, all_files=False, flags=0): self._request("rollback", "snapshot", [snapshot.name, '-r']) return def _dependencies_list(self, fs_name, volume=False): obj = "folder" if volume: obj = 'volume' pool_id = NexentaStor._get_pool_id(fs_name) fs_list = self._request("get_all_names", "folder", ["^%s/" % pool_id]) dependency_list = [] for filesystem in fs_list: origin = self._request("get_child_prop", "folder", [filesystem, 'origin']) if origin.startswith("%s/" % fs_name) or \ origin.startswith("%s@" % fs_name): dependency_list.append(filesystem) return dependency_list @handle_nstor_errors def fs_child_dependency(self, fs, files, flags=0): # Function get list of all folders of requested pool, # then it checks if 'fs' is the origin of one of folders return len(self._dependencies_list(fs.name)) > 0 @handle_nstor_errors def fs_child_dependency_rm(self, fs, files, flags=0): dep_list = self._dependencies_list(fs.name) for dep in dep_list: clone_name = dep.split('@')[0] self._request("promote", "folder", [clone_name]) return None @handle_nstor_errors def export_auth(self, flags=0): """ Returns the types of authentication that are available for NFS """ result = self._request("get_share_confopts", "netstorsvc", ['svc:/network/nfs/server:default']) rc = [] methods = result['auth_type']['opts'].split(';') for m in methods: rc.append(m.split('=>')[0]) return rc @handle_nstor_errors def exports(self, search_key=None, search_value=None, flags=0): """ Get a list of all exported file systems on the controller. """ exp_list = self._request("get_shared_folders", "netstorsvc", ['svc:/network/nfs/server:default', '']) exports = [] for e in exp_list: opts = self._request("get_shareopts", "netstorsvc", ['svc:/network/nfs/server:default', e]) exports.append(NfsExport(md5(opts['name']), e, opts['name'], opts['auth_type'], opts['root'], opts['read_write'], opts['read_only'], 'N/A', 'N/A', opts['extra_options'])) return search_property(exports, search_key, search_value) @handle_nstor_errors def export_fs(self, fs_id, export_path, root_list, rw_list, ro_list, anon_uid, anon_gid, auth_type, options, flags=0): """ Exports a filesystem as specified in the export """ if export_path is None: raise LsmError(ErrorNumber.INVALID_ARGUMENT, "Export path is required") md5_id = md5(export_path) fs_dict = {'auth_type': 'sys', 'anonymous': 'false'} if ro_list: fs_dict['read_only'] = ','.join(ro_list) if rw_list: fs_dict['read_write'] = ','.join(rw_list) if anon_uid or anon_gid: fs_dict['anonymous'] = 'true' if root_list: fs_dict['root'] = ','.join(root_list) if auth_type: fs_dict['auth_type'] = str(auth_type) if '*' in rw_list: fs_dict['anonymous'] = 'true' if options: fs_dict['extra_options'] = str(options) result = self._request("share_folder", "netstorsvc", ['svc:/network/nfs/server:default', fs_id, fs_dict]) return NfsExport(md5_id, fs_id, export_path, auth_type, root_list, rw_list, ro_list, anon_uid, anon_gid, options) @handle_nstor_errors def export_remove(self, export, flags=0): """ Removes the specified export """ self._request("unshare_folder", "netstorsvc", ['svc:/network/nfs/server:default', export.fs_id, '0']) return ########### SAN @staticmethod def _calc_group(name): return 'lsm_' + md5(name)[0:8] @handle_nstor_errors def volumes(self, search_key=None, search_value=None, flags=0): """ Returns an array of volume objects """ vol_list = [] lu_list = self._request("get_names", "zvol", [""]) # lu_list = self._ns_request('rest/nms', # {"method": "get_lu_list", # "object": "scsidisk", # "params": ['']}) for lu in lu_list: try: lu_props = self._request("get_lu_props", "scsidisk", [lu]) except: lu_props = {'guid': '', 'state': 'N/A'} zvol_props = self._request("get_child_props", "zvol", [lu, ""]) block_size = NexentaStor._to_bytes(zvol_props['volblocksize']) size_bytes = int(zvol_props['size_bytes']) num_of_blocks = size_bytes / block_size admin_state = Volume.ADMIN_STATE_ENABLED vol_list.append( Volume(lu, lu, lu_props['guid'].lower(), block_size, num_of_blocks, admin_state, self.system.id, NexentaStor._get_pool_id(lu))) return search_property(vol_list, search_key, search_value) @handle_nstor_errors def volume_create(self, pool, volume_name, size_bytes, provisioning, flags=0): """ Creates a volume, given a pool, volume name, size and provisioning returns a tuple (job_id, new volume) Note: Tuple return values are mutually exclusive, when one is None the other must be valid. """ if volume_name.startswith(pool.name + '/'): chunks = volume_name.split('/')[1:] volume_name = '/'.join(chunks) sparse = provisioning in (Volume.PROVISION_DEFAULT, Volume.PROVISION_THIN, Volume.PROVISION_UNKNOWN) if sparse: sparse = '1' else: sparse = '0' name = '%s/%s' % (pool.name, volume_name) block_size = '' self._request("create", "zvol", [name, str(size_bytes), block_size, sparse]) self._request("set_child_prop", "zvol", [name, 'compression', 'on']) self._request("set_child_prop", "zvol", [name, 'logbias', 'throughput']) self._request("create_lu", "scsidisk", [name, []]) vols = self.volumes('id', name) return None, vols[0] @handle_nstor_errors def volume_delete(self, volume, flags=0): """ Deletes a volume. Returns None on success, else raises an LsmError """ ag = self.access_groups_granted_to_volume(volume) if len(ag): raise LsmError(ErrorNumber.IS_MASKED, "Volume is masked to access group") self._request("delete_lu", "scsidisk", [volume.id]) self._request("destroy", "zvol", [volume.id, '']) return @handle_nstor_errors def volume_resize(self, volume, new_size_bytes, flags=0): """ Re-sizes a volume. Returns a tuple (job_id, re-sized_volume) Note: Tuple return values are mutually exclusive, when one is None the other must be valid. """ self._request("set_child_prop", "zvol", [volume.name, 'volsize', str(new_size_bytes)]) self._request("realign_size", "scsidisk", [volume.name]) new_num_of_blocks = new_size_bytes / volume.block_size return None, Volume(volume.id, volume.name, volume.vpd83, volume.block_size, new_num_of_blocks, volume.admin_state, volume.system_id, volume.pool_id) @handle_nstor_errors def volume_replicate(self, pool, rep_type, volume_src, name, flags=0): """ Replicates a volume from the specified pool. In this library, to replicate means to create a new volume which is a copy of the source. Returns a tuple (job_id, replicated volume) Note: Tuple return values are mutually exclusive, when one is None the other must be valid. """ raise LsmError(ErrorNumber.NO_SUPPORT, "volume_replicate not implemented") # if rep_type == Volume.REPLICATE_CLONE: # return # elif rep_type == Volume.REPLICATE_COPY: # return # elif rep_type == Volume.REPLICATE_MIRROR_SYNC: # return # elif rep_type == Volume.REPLICATE_MIRROR_ASYNC: # # AutoSync job - code not yet ready # rec = {'type': 'minute', 'auto-mount': '', 'dircontent': '0', # 'direction': '0', 'keep_src': '1', 'keep_dst': '1', # 'auto-clone': '0', 'marker': '', 'method': 'sync', # 'proto': 'zfs', 'period': '1', 'exclude': '', # 'from-host': 'localhost', 'from-fs': str(volume_src.name), # 'to-host': 'localhost', 'to-fs': '/backup', # 'progress-marker': '', 'day': '0', # 'hour': '0', 'minute': '0', 'options': ' -P1024 -n4', # 'from-snapshot': '', 'force': '46', 'retry': '0', # 'retry-timestamp': '0', 'comment': '', 'flags': '4', # 'trace_level': '10', 'rate_limit': '0', # 'autosync_role': 'master:no', 'action': '', # 'reverse_capable': '0', 'last_replic_time': '', # 'time_started': 'N/A', # '_unique': 'type from-host from-fs to-host to-fs', # 'zip_level': '0', 'success_counter': '0', 'trunk': '', # 'estimations': '0', 'marker_name': "AutoSync", # 'latest-suffix': '', 'custom_name': ''} # ret = self._ns_request('rest/nms', {"method": "fmri_create", # "object": "autosvc", # "params": ['auto-sync', '', # str(volume_src.name), # False, rec]}) # return # elif rep_type == Volume.REPLICATE_UNKNOWN: # return # return @handle_nstor_errors def iscsi_chap_auth(self, init_id, in_user, in_password, out_user, out_password, flags=0): """ Register a user/password for the specified initiator for CHAP authentication. """ if in_user is None: in_user = "" if in_password is None: in_password = "" if out_user is not None or out_password is not None: raise LsmError(ErrorNumber.INVALID_ARGUMENT, "outbound chap authentication is not supported at " "this time") try: self._request("create_initiator", "iscsitarget", [init_id, {'initiatorchapuser': in_user, 'initiatorchapsecret': in_password}]) except: self._request("modify_initiator", "iscsitarget", [init_id, {'initiatorchapuser': in_user, 'initiatorchapsecret': in_password}]) self._request("modify_initiator", "iscsitarget", [init_id, {'initiatorchapuser': in_user, 'initiatorchapsecret': in_password}]) return def _get_views(self, volume_name): results = [] try: results = self._request("list_lun_mapping_entries", "scsidisk", [volume_name]) except Exception: pass return results def _volume_mask(self, group_name, volume_name): self._request("add_lun_mapping_entry", "scsidisk", [volume_name, {'host_group': group_name}]) return @handle_nstor_errors def volume_mask(self, access_group, volume, flags=0): """ Allows an access group to access a volume. """ # Pre-check for already masked. if list(v.id for v in self.volumes_accessible_by_access_group(access_group) if v.id == volume.id): raise LsmError( ErrorNumber.NO_STATE_CHANGE, "Volume is already masked to requested access group") self._volume_mask(access_group.name, volume.name) return @handle_nstor_errors def volume_unmask(self, access_group, volume, flags=0): """ Revokes access for an access group for a volume """ views = self._get_views(volume.name) view_number = -1 for view in views: if view['host_group'] == access_group.name: view_number = view['entry_number'] if view_number == -1: raise LsmError(ErrorNumber.NO_STATE_CHANGE, "There is no such mapping for volume %s" % volume.name) self._request("remove_lun_mapping_entry", "scsidisk", [volume.name, view_number]) return @handle_nstor_errors def access_groups(self, search_key=None, search_value=None, flags=0): """ Returns a list of access groups """ hg_list = self._request("list_hostgroups", "stmf", []) ag_list = [] for hg in hg_list: init_ids = self._request("list_hostgroup_members", "stmf", [hg]) ag_list.append( AccessGroup(hg, hg, init_ids, AccessGroup.INIT_TYPE_ISCSI_IQN, self.system.id)) return search_property(ag_list, search_key, search_value) @handle_nstor_errors def access_group_create(self, name, init_id, init_type, system, flags=0): """ Creates of access group """ if system.id != self.system.id: raise LsmError(ErrorNumber.NOT_FOUND_SYSTEM, "System %s not found" % system.id) if init_type != AccessGroup.INIT_TYPE_ISCSI_IQN: raise LsmError(ErrorNumber.NO_SUPPORT, "Nstor only support iSCSI Access Group") # Check that init_id is not a part of another hostgroup for ag in self.access_groups(): if init_id in ag.init_ids: raise LsmError(ErrorNumber.EXISTS_INITIATOR, "%s is already part of %s access group" % (init_id, ag.name)) if name == ag.name: raise LsmError(ErrorNumber.NAME_CONFLICT, "Access group with name exists!") self._request("create_hostgroup", "stmf", [name]) self._add_initiator(name, init_id) return AccessGroup(name, name, [init_id], init_type, system.id) @handle_nstor_errors def access_group_delete(self, access_group, flags=0): """ Deletes an access group """ vols = self.volumes_accessible_by_access_group(access_group) if len(vols): raise LsmError(ErrorNumber.IS_MASKED, "Access Group has volume(s) masked") self._request("destroy_hostgroup", "stmf", [access_group.name]) return @handle_nstor_errors def _add_initiator(self, group_name, initiator_id, remove=False): command = "add_hostgroup_member" if remove: command = "remove_hostgroup_member" self._request(command, "stmf", [group_name, initiator_id]) return def _access_group_initiators(self, access_group): hg_list = self._request("list_hostgroups", "stmf", []) if access_group.name not in hg_list: raise LsmError(ErrorNumber.NOT_FOUND_ACCESS_GROUP, "AccessGroup %s(%s) not found" % (access_group.name, access_group.id)) return self._request("list_hostgroup_members", "stmf", [access_group.name]) @handle_nstor_errors def access_group_initiator_add(self, access_group, init_id, init_type, flags=0): """ Adds an initiator to an access group """ if init_type != AccessGroup.INIT_TYPE_ISCSI_IQN: raise LsmError(ErrorNumber.NO_SUPPORT, "Nstor only support iSCSI Access Group") init_ids = self._access_group_initiators(access_group) if init_id in init_ids: # Already in requested group. return copy.deepcopy(access_group) self._add_initiator(access_group.name, init_id) init_ids = self._request("list_hostgroup_members", "stmf", [access_group.name]) return AccessGroup(access_group.name, access_group.name, init_ids, AccessGroup.INIT_TYPE_ISCSI_IQN, access_group.id) @handle_nstor_errors def access_group_initiator_delete(self, access_group, init_id, init_type, flags=0): """ Deletes an initiator from an access group """ init_ids = self._access_group_initiators(access_group) if init_id not in init_ids: # Already removed from requested group. return copy.deepcopy(access_group) self._add_initiator(access_group.name, init_id, True) init_ids = self._request("list_hostgroup_members", "stmf", [access_group.name]) return AccessGroup(access_group.name, access_group.name, init_ids, AccessGroup.INIT_TYPE_ISCSI_IQN, access_group.id) @handle_nstor_errors def volumes_accessible_by_access_group(self, access_group, flags=0): """ Returns the list of volumes that access group has access to. """ volumes = [] all_volumes_list = self.volumes(flags=flags) for vol in all_volumes_list: for view in self._get_views(vol.name): if view['host_group'] == access_group.name: volumes.append(vol) return volumes @handle_nstor_errors def access_groups_granted_to_volume(self, volume, flags=0): """ Returns the list of access groups that have access to the specified """ ag_list = self.access_groups(flags=flags) hg = [] for view in self._get_views(volume.name): for ag in ag_list: if ag.name == view['host_group']: hg.append(ag) return hg @handle_nstor_errors def volume_child_dependency(self, volume, flags=0): """ Returns True if this volume has other volumes which are dependant on it. Implies that this volume cannot be deleted or possibly modified because it would affect its children. """ return len(self._dependencies_list(volume.name, True)) > 0 @handle_nstor_errors def volume_child_dependency_rm(self, volume, flags=0): """ If this volume has child dependency, this method call will fully replicate the blocks removing the relationship between them. This should return None (success) if volume_child_dependency would return False. Note: This operation could take a very long time depending on the size of the volume and the number of child dependencies. Returns None if complete else job id, raises LsmError on errors. """ dep_list = self._dependencies_list(volume.name) for dep in dep_list: clone_name = dep.split('@')[0] self._request("promote", "volume", [clone_name]) return None libstoragemgmt-1.2.3/plugin/sim/0000775000175000017500000000000012542455463013640 500000000000000libstoragemgmt-1.2.3/plugin/sim/simulator.py0000664000175000017500000002761012537737032016156 00000000000000# Copyright (C) 2011-2014 Red Hat, Inc. # This library is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2.1 of the License, or any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library; If not, see . # # Author: tasleson # Gris Ge from lsm import (uri_parse, VERSION, Capabilities, INfs, IStorageAreaNetwork, search_property) from simarray import SimArray class SimPlugin(INfs, IStorageAreaNetwork): """ Simple class that implements enough to allow the framework to be exercised. """ def __init__(self): self.uri = None self.password = None self.sim_array = None def plugin_register(self, uri, password, timeout, flags=0): self.uri = uri self.password = password #The caller may want to start clean, so we allow the caller to specify #a file to store and retrieve individual state. qp = uri_parse(uri) if 'parameters' in qp and 'statefile' in qp['parameters'] \ and qp['parameters']['statefile'] is not None: self.sim_array = SimArray(qp['parameters']['statefile'], timeout) else: self.sim_array = SimArray(None, timeout) return None def plugin_unregister(self, flags=0): pass def job_status(self, job_id, flags=0): return self.sim_array.job_status(job_id, flags) def job_free(self, job_id, flags=0): return self.sim_array.job_free(job_id, flags) @staticmethod def _sim_data_2_lsm(sim_data): """ Fake converter. SimArray already do SimData to LSM data convert. We move data convert to SimArray to make this sample plugin looks clean. But in real world, data converting is often handled by plugin itself rather than array. """ return sim_data def time_out_set(self, ms, flags=0): self.sim_array.time_out_set(ms, flags) return None def time_out_get(self, flags=0): return self.sim_array.time_out_get(flags) def capabilities(self, system, flags=0): rc = Capabilities() rc.enable_all() rc.set(Capabilities.POOLS_QUICK_SEARCH, Capabilities.UNSUPPORTED) rc.set(Capabilities.VOLUMES_QUICK_SEARCH, Capabilities.UNSUPPORTED) rc.set(Capabilities.DISKS_QUICK_SEARCH, Capabilities.UNSUPPORTED) rc.set(Capabilities.FS_QUICK_SEARCH, Capabilities.UNSUPPORTED) rc.set(Capabilities.ACCESS_GROUPS_QUICK_SEARCH, Capabilities.UNSUPPORTED) rc.set(Capabilities.NFS_EXPORTS_QUICK_SEARCH, Capabilities.UNSUPPORTED) rc.set(Capabilities.TARGET_PORTS_QUICK_SEARCH, Capabilities.UNSUPPORTED) return rc def plugin_info(self, flags=0): return "Storage simulator", VERSION def systems(self, flags=0): sim_syss = self.sim_array.systems() return [SimPlugin._sim_data_2_lsm(s) for s in sim_syss] def pools(self, search_key=None, search_value=None, flags=0): sim_pools = self.sim_array.pools(flags) return search_property( [SimPlugin._sim_data_2_lsm(p) for p in sim_pools], search_key, search_value) def volumes(self, search_key=None, search_value=None, flags=0): sim_vols = self.sim_array.volumes() return search_property( [SimPlugin._sim_data_2_lsm(v) for v in sim_vols], search_key, search_value) def disks(self, search_key=None, search_value=None, flags=0): sim_disks = self.sim_array.disks() return search_property( [SimPlugin._sim_data_2_lsm(d) for d in sim_disks], search_key, search_value) def volume_create(self, pool, volume_name, size_bytes, provisioning, flags=0): sim_vol = self.sim_array.volume_create( pool.id, volume_name, size_bytes, provisioning, flags) return SimPlugin._sim_data_2_lsm(sim_vol) def volume_delete(self, volume, flags=0): return self.sim_array.volume_delete(volume.id, flags) def volume_resize(self, volume, new_size_bytes, flags=0): sim_vol = self.sim_array.volume_resize( volume.id, new_size_bytes, flags) return SimPlugin._sim_data_2_lsm(sim_vol) def volume_replicate(self, pool, rep_type, volume_src, name, flags=0): dst_pool_id = None if pool is not None: dst_pool_id = pool.id else: dst_pool_id = volume_src.pool_id return self.sim_array.volume_replicate( dst_pool_id, rep_type, volume_src.id, name, flags) def volume_replicate_range_block_size(self, system, flags=0): return self.sim_array.volume_replicate_range_block_size( system.id, flags) def volume_replicate_range(self, rep_type, volume_src, volume_dest, ranges, flags=0): return self.sim_array.volume_replicate_range( rep_type, volume_src.id, volume_dest.id, ranges, flags) def volume_enable(self, volume, flags=0): return self.sim_array.volume_enable(volume.id, flags) def volume_disable(self, volume, flags=0): return self.sim_array.volume_disable(volume.id, flags) def access_groups(self, search_key=None, search_value=None, flags=0): sim_ags = self.sim_array.ags() return search_property( [SimPlugin._sim_data_2_lsm(a) for a in sim_ags], search_key, search_value) def access_group_create(self, name, init_id, init_type, system, flags=0): sim_ag = self.sim_array.access_group_create( name, init_id, init_type, system.id, flags) return SimPlugin._sim_data_2_lsm(sim_ag) def access_group_delete(self, access_group, flags=0): return self.sim_array.access_group_delete(access_group.id, flags) def access_group_initiator_add(self, access_group, init_id, init_type, flags=0): sim_ag = self.sim_array.access_group_initiator_add( access_group.id, init_id, init_type, flags) return SimPlugin._sim_data_2_lsm(sim_ag) def access_group_initiator_delete(self, access_group, init_id, init_type, flags=0): sim_ag = self.sim_array.access_group_initiator_delete( access_group.id, init_id, init_type, flags) return SimPlugin._sim_data_2_lsm(sim_ag) def volume_mask(self, access_group, volume, flags=0): return self.sim_array.volume_mask( access_group.id, volume.id, flags) def volume_unmask(self, access_group, volume, flags=0): return self.sim_array.volume_unmask( access_group.id, volume.id, flags) def volumes_accessible_by_access_group(self, access_group, flags=0): sim_vols = self.sim_array.volumes_accessible_by_access_group( access_group.id, flags) return [SimPlugin._sim_data_2_lsm(v) for v in sim_vols] def access_groups_granted_to_volume(self, volume, flags=0): sim_vols = self.sim_array.access_groups_granted_to_volume( volume.id, flags) return [SimPlugin._sim_data_2_lsm(v) for v in sim_vols] def iscsi_chap_auth(self, init_id, in_user, in_password, out_user, out_password, flags=0): return self.sim_array.iscsi_chap_auth( init_id, in_user, in_password, out_user, out_password, flags) def volume_child_dependency(self, volume, flags=0): return self.sim_array.volume_child_dependency(volume.id, flags) def volume_child_dependency_rm(self, volume, flags=0): return self.sim_array.volume_child_dependency_rm(volume.id, flags) def fs(self, search_key=None, search_value=None, flags=0): sim_fss = self.sim_array.fs() return search_property( [SimPlugin._sim_data_2_lsm(f) for f in sim_fss], search_key, search_value) def fs_create(self, pool, name, size_bytes, flags=0): sim_fs = self.sim_array.fs_create(pool.id, name, size_bytes) return SimPlugin._sim_data_2_lsm(sim_fs) def fs_delete(self, fs, flags=0): return self.sim_array.fs_delete(fs.id, flags) def fs_resize(self, fs, new_size_bytes, flags=0): sim_fs = self.sim_array.fs_resize( fs.id, new_size_bytes, flags) return SimPlugin._sim_data_2_lsm(sim_fs) def fs_clone(self, src_fs, dest_fs_name, snapshot=None, flags=0): if snapshot is None: return self.sim_array.fs_clone( src_fs.id, dest_fs_name, None, flags) return self.sim_array.fs_clone( src_fs.id, dest_fs_name, snapshot.id, flags) def fs_file_clone(self, fs, src_file_name, dest_file_name, snapshot=None, flags=0): if snapshot is None: return self.sim_array.fs_file_clone( fs.id, src_file_name, dest_file_name, None, flags) return self.sim_array.fs_file_clone( fs.id, src_file_name, dest_file_name, snapshot.id, flags) def fs_snapshots(self, fs, flags=0): sim_snaps = self.sim_array.fs_snapshots(fs.id, flags) return [SimPlugin._sim_data_2_lsm(s) for s in sim_snaps] def fs_snapshot_create(self, fs, snapshot_name, flags=0): return self.sim_array.fs_snapshot_create( fs.id, snapshot_name, flags) def fs_snapshot_delete(self, fs, snapshot, flags=0): return self.sim_array.fs_snapshot_delete( fs.id, snapshot.id, flags) def fs_snapshot_restore(self, fs, snapshot, files, restore_files, all_files=False, flags=0): return self.sim_array.fs_snapshot_restore( fs.id, snapshot.id, files, restore_files, all_files, flags) def fs_child_dependency(self, fs, files, flags=0): return self.sim_array.fs_child_dependency(fs.id, files, flags) def fs_child_dependency_rm(self, fs, files, flags=0): return self.sim_array.fs_child_dependency_rm(fs.id, files, flags) def export_auth(self, flags=0): # The API should change some day return ["simple"] def exports(self, search_key=None, search_value=None, flags=0): sim_exps = self.sim_array.exports(flags) return search_property( [SimPlugin._sim_data_2_lsm(e) for e in sim_exps], search_key, search_value) def export_fs(self, fs_id, export_path, root_list, rw_list, ro_list, anon_uid, anon_gid, auth_type, options, flags=0): sim_exp = self.sim_array.fs_export( fs_id, export_path, root_list, rw_list, ro_list, anon_uid, anon_gid, auth_type, options, flags=0) return SimPlugin._sim_data_2_lsm(sim_exp) def export_remove(self, export, flags=0): return self.sim_array.fs_unexport(export.id, flags) def target_ports(self, search_key=None, search_value=None, flags=0): sim_tgts = self.sim_array.target_ports() return search_property( [SimPlugin._sim_data_2_lsm(t) for t in sim_tgts], search_key, search_value) def volume_raid_info(self, volume, flags=0): return self.sim_array.volume_raid_info(volume) def pool_member_info(self, pool, flags=0): return self.sim_array.pool_member_info(pool) def volume_raid_create_cap_get(self, system, flags=0): return self.sim_array.volume_raid_create_cap_get(system) def volume_raid_create(self, name, raid_type, disks, strip_size, flags=0): return self.sim_array.volume_raid_create( name, raid_type, disks, strip_size) libstoragemgmt-1.2.3/plugin/sim/sim_lsmplugin0000775000175000017500000000234012537737032016366 00000000000000#!/usr/bin/env python2 # Copyright (C) 2011-2013 Red Hat, Inc. # This library is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2.1 of the License, or any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library; If not, see . # # Author: tasleson import sys import syslog import traceback try: from lsm import PluginRunner from lsm.plugin.sim.simulator import SimPlugin if __name__ == '__main__': PluginRunner(SimPlugin, sys.argv).run() except Exception: #This should be quite rare, but when it does happen this is pretty #key in understanding what happened, especially when it happens when #running from the daemon. msg = str(traceback.format_exc()) syslog.syslog(syslog.LOG_ERR, msg) sys.stderr.write(msg) sys.exit(1) libstoragemgmt-1.2.3/plugin/sim/simarray.py0000664000175000017500000025156312537737032015774 00000000000000# Copyright (C) 2011-2015 Red Hat, Inc. # This library is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2.1 of the License, or any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library; If not, see . # # Author: tasleson # Gris Ge import random import tempfile import os import time import sqlite3 from lsm import (size_human_2_size_bytes) from lsm import (System, Volume, Disk, Pool, FileSystem, AccessGroup, FsSnapshot, NfsExport, md5, LsmError, TargetPort, ErrorNumber, JobStatus) def _handle_errors(method): def wrapper(*args, **kargs): try: return method(*args, **kargs) except sqlite3.OperationalError as sql_error: if type(args[0]) is SimArray and hasattr(args[0], 'bs_obj'): args[0].bs_obj.trans_rollback() if str(sql_error) == 'database is locked': raise LsmError( ErrorNumber.TIMEOUT, "Timeout to require lock on state file") raise LsmError( ErrorNumber.PLUGIN_BUG, "Got unexpected error from sqlite3: %s" % str(sql_error)) except LsmError: if type(args[0]) is SimArray and hasattr(args[0], 'bs_obj'): args[0].bs_obj.trans_rollback() raise except Exception as base_error: if type(args[0]) is SimArray and hasattr(args[0], 'bs_obj'): args[0].bs_obj.trans_rollback() raise LsmError( ErrorNumber.PLUGIN_BUG, "Got unexpected error: %s" % str(base_error)) return wrapper def _random_vpd(): """ Generate a random VPD83 NAA_Type3 ID """ vpd = ['60'] for _ in range(0, 15): vpd.append(str('%02x' % (random.randint(0, 255)))) return "".join(vpd) class PoolRAID(object): _RAID_DISK_CHK = { Volume.RAID_TYPE_JBOD: lambda x: x > 0, Volume.RAID_TYPE_RAID0: lambda x: x > 0, Volume.RAID_TYPE_RAID1: lambda x: x == 2, Volume.RAID_TYPE_RAID3: lambda x: x >= 3, Volume.RAID_TYPE_RAID4: lambda x: x >= 3, Volume.RAID_TYPE_RAID5: lambda x: x >= 3, Volume.RAID_TYPE_RAID6: lambda x: x >= 4, Volume.RAID_TYPE_RAID10: lambda x: x >= 4 and x % 2 == 0, Volume.RAID_TYPE_RAID15: lambda x: x >= 6 and x % 2 == 0, Volume.RAID_TYPE_RAID16: lambda x: x >= 8 and x % 2 == 0, Volume.RAID_TYPE_RAID50: lambda x: x >= 6 and x % 2 == 0, Volume.RAID_TYPE_RAID60: lambda x: x >= 8 and x % 2 == 0, Volume.RAID_TYPE_RAID51: lambda x: x >= 6 and x % 2 == 0, Volume.RAID_TYPE_RAID61: lambda x: x >= 8 and x % 2 == 0, } _RAID_PARITY_DISK_COUNT_FUNC = { Volume.RAID_TYPE_JBOD: lambda x: x, Volume.RAID_TYPE_RAID0: lambda x: x, Volume.RAID_TYPE_RAID1: lambda x: 1, Volume.RAID_TYPE_RAID3: lambda x: x - 1, Volume.RAID_TYPE_RAID4: lambda x: x - 1, Volume.RAID_TYPE_RAID5: lambda x: x - 1, Volume.RAID_TYPE_RAID6: lambda x: x - 2, Volume.RAID_TYPE_RAID10: lambda x: x / 2, Volume.RAID_TYPE_RAID15: lambda x: x / 2 - 1, Volume.RAID_TYPE_RAID16: lambda x: x / 2 - 2, Volume.RAID_TYPE_RAID50: lambda x: x - 2, Volume.RAID_TYPE_RAID60: lambda x: x - 4, Volume.RAID_TYPE_RAID51: lambda x: x / 2 - 1, Volume.RAID_TYPE_RAID61: lambda x: x / 2 - 2, } @staticmethod def data_disk_count(raid_type, disk_count): """ Return a integer indicating how many disks should be used as real data(not mirrored or parity) disks. Treating RAID 5 and 6 using fixed parity disk. """ if raid_type not in PoolRAID._RAID_DISK_CHK.keys(): raise LsmError( ErrorNumber.PLUGIN_BUG, "data_disk_count(): Got unsupported raid type(%d)" % raid_type) if PoolRAID._RAID_DISK_CHK[raid_type](disk_count) is False: raise LsmError( ErrorNumber.PLUGIN_BUG, "data_disk_count(): Illegal disk count" "(%d) for raid type(%d)" % (disk_count, raid_type)) return PoolRAID._RAID_PARITY_DISK_COUNT_FUNC[raid_type](disk_count) class BackStore(object): VERSION = "3.4" VERSION_SIGNATURE = 'LSM_SIMULATOR_DATA_%s_%s' % (VERSION, md5(VERSION)) JOB_DEFAULT_DURATION = 1 JOB_DATA_TYPE_VOL = 1 JOB_DATA_TYPE_FS = 2 JOB_DATA_TYPE_FS_SNAP = 3 SYS_ID = "sim-01" SYS_NAME = "LSM simulated storage plug-in" BLK_SIZE = 512 DEFAULT_STRIP_SIZE = 128 * 1024 # 128 KiB _LIST_SPLITTER = '#' SYS_KEY_LIST = ['id', 'name', 'status', 'status_info', 'version'] POOL_KEY_LIST = [ 'id', 'name', 'status', 'status_info', 'element_type', 'unsupported_actions', 'raid_type', 'member_type', 'parent_pool_id', 'total_space', 'free_space', 'strip_size'] DISK_KEY_LIST = [ 'id', 'name', 'total_space', 'disk_type', 'status', 'owner_pool_id', 'role'] VOL_KEY_LIST = [ 'id', 'vpd83', 'name', 'total_space', 'consumed_size', 'pool_id', 'admin_state', 'thinp', 'is_hw_raid_vol'] TGT_KEY_LIST = [ 'id', 'port_type', 'service_address', 'network_address', 'physical_address', 'physical_name'] AG_KEY_LIST = ['id', 'name', 'init_type', 'init_ids_str'] JOB_KEY_LIST = ['id', 'duration', 'timestamp', 'data_type', 'data_id'] FS_KEY_LIST = [ 'id', 'name', 'total_space', 'free_space', 'consumed_size', 'pool_id'] FS_SNAP_KEY_LIST = [ 'id', 'fs_id', 'name', 'timestamp'] EXP_KEY_LIST = [ 'id', 'fs_id', 'exp_path', 'auth_type', 'anon_uid', 'anon_gid', 'options', 'exp_root_hosts_str', 'exp_rw_hosts_str', 'exp_ro_hosts_str'] SUPPORTED_VCR_RAID_TYPES = [ Volume.RAID_TYPE_RAID0, Volume.RAID_TYPE_RAID1, Volume.RAID_TYPE_RAID5, Volume.RAID_TYPE_RAID6, Volume.RAID_TYPE_RAID10, Volume.RAID_TYPE_RAID50, Volume.RAID_TYPE_RAID60] SUPPORTED_VCR_STRIP_SIZES = [ 8 * 1024, 16 * 1024, 32 * 1024, 64 * 1024, 128 * 1024, 256 * 1024, 512 * 1024, 1024 * 1024] def __init__(self, statefile, timeout): if not os.path.exists(statefile): os.close(os.open(statefile, os.O_WRONLY | os.O_CREAT)) # Due to umask, os.open() created file migt not be 666 permission. os.chmod(statefile, 0o666) self.statefile = statefile self.lastrowid = None self.sql_conn = sqlite3.connect( statefile, timeout=int(timeout/1000), isolation_level="IMMEDIATE") # Create tables no matter exist or not. No lock required. sql_cmd = "PRAGMA foreign_keys = ON;\n" sql_cmd += ( """ CREATE TABLE systems ( id TEXT PRIMARY KEY, name TEXT NOT NULL, status INTEGER NOT NULL, status_info TEXT, version TEXT NOT NULL); """) # ^ version hold the signature of data sql_cmd += ( "CREATE TABLE tgts (" "id INTEGER PRIMARY KEY, " "port_type INTEGER NOT NULL, " "service_address TEXT NOT NULL, " "network_address TEXT NOT NULL, " "physical_address TEXT NOT NULL, " "physical_name TEXT NOT NULL);\n") sql_cmd += ( "CREATE TABLE pools (" "id INTEGER PRIMARY KEY, " "name TEXT UNIQUE NOT NULL, " "status INTEGER NOT NULL, " "status_info TEXT, " "element_type INTEGER NOT NULL, " "unsupported_actions INTEGER, " "raid_type INTEGER NOT NULL, " "parent_pool_id INTEGER, " # ^ Indicate this pool is allocated from # other pool "member_type INTEGER, " "strip_size INTEGER, " "total_space LONG);\n") # ^ total_space here is only for sub-pool (pool from pool) sql_cmd += ( "CREATE TABLE disks (" "id INTEGER PRIMARY KEY, " "total_space LONG NOT NULL, " "disk_type INTEGER NOT NULL, " "status INTEGER NOT NULL, " "disk_prefix TEXT NOT NULL, " "owner_pool_id INTEGER, " # ^ Indicate this disk is used to assemble a pool "role TEXT," "FOREIGN KEY(owner_pool_id) " "REFERENCES pools(id) ON DELETE CASCADE );\n") sql_cmd += ( "CREATE TABLE volumes (" "id INTEGER PRIMARY KEY, " "vpd83 TEXT NOT NULL, " "name TEXT UNIQUE NOT NULL, " "total_space LONG NOT NULL, " "consumed_size LONG NOT NULL, " # ^ Reserved for future thinp support. "admin_state INTEGER, " "thinp INTEGER NOT NULL, " "is_hw_raid_vol INTEGER, " # ^ Once its volume deleted, pool will be delete also. # For HW RAID simulation only. "pool_id INTEGER NOT NULL, " "FOREIGN KEY(pool_id) " "REFERENCES pools(id) ON DELETE CASCADE);\n") sql_cmd += ( "CREATE TABLE ags (" "id INTEGER PRIMARY KEY, " "name TEXT UNIQUE NOT NULL);\n") sql_cmd += ( "CREATE TABLE inits (" "id TEXT UNIQUE NOT NULL, " "init_type INTEGER NOT NULL, " "owner_ag_id INTEGER NOT NULL, " "FOREIGN KEY(owner_ag_id) " "REFERENCES ags(id) ON DELETE CASCADE);\n") sql_cmd += ( "CREATE TABLE vol_masks (" "vol_id INTEGER NOT NULL, " "ag_id INTEGER NOT NULL, " "FOREIGN KEY(vol_id) REFERENCES volumes(id) ON DELETE CASCADE, " "FOREIGN KEY(ag_id) REFERENCES ags(id) ON DELETE CASCADE);\n") sql_cmd += ( "CREATE TABLE vol_reps (" "rep_type INTEGER, " "src_vol_id INTEGER NOT NULL, " "dst_vol_id INTEGER NOT NULL, " "FOREIGN KEY(src_vol_id) " "REFERENCES volumes(id) ON DELETE CASCADE, " "FOREIGN KEY(dst_vol_id) " "REFERENCES volumes(id) ON DELETE CASCADE);\n") sql_cmd += ( "CREATE TABLE fss (" "id INTEGER PRIMARY KEY, " "name TEXT UNIQUE NOT NULL, " "total_space LONG NOT NULL, " "consumed_size LONG NOT NULL, " "free_space LONG, " "pool_id INTEGER NOT NULL, " "FOREIGN KEY(pool_id) " "REFERENCES pools(id) ON DELETE CASCADE);\n") sql_cmd += ( "CREATE TABLE fs_snaps (" "id INTEGER PRIMARY KEY, " "name TEXT UNIQUE NOT NULL, " "fs_id INTEGER NOT NULL, " "timestamp LONG NOT NULL, " "FOREIGN KEY(fs_id) " "REFERENCES fss(id) ON DELETE CASCADE);\n") sql_cmd += ( "CREATE TABLE fs_clones (" "src_fs_id INTEGER NOT NULL, " "dst_fs_id INTEGER NOT NULL, " "FOREIGN KEY(src_fs_id) " "REFERENCES fss(id) ON DELETE CASCADE, " "FOREIGN KEY(dst_fs_id) " "REFERENCES fss(id) ON DELETE CASCADE);\n") sql_cmd += ( "CREATE TABLE exps (" "id INTEGER PRIMARY KEY, " "fs_id INTEGER NOT NULL, " "exp_path TEXT UNIQUE NOT NULL, " "auth_type TEXT, " "anon_uid INTEGER, " "anon_gid INTEGER, " "options TEXT, " "FOREIGN KEY(fs_id) " "REFERENCES fss(id) ON DELETE CASCADE);\n") sql_cmd += ( "CREATE TABLE exp_root_hosts(" "host TEXT NOT NULL, " "exp_id INTEGER NOT NULL, " "FOREIGN KEY(exp_id) " "REFERENCES exps(id) ON DELETE CASCADE);\n") sql_cmd += ( "CREATE TABLE exp_rw_hosts(" "host TEXT NOT NULL, " "exp_id INTEGER NOT NULL, " "FOREIGN KEY(exp_id) " "REFERENCES exps(id) ON DELETE CASCADE);\n") sql_cmd += ( "CREATE TABLE exp_ro_hosts(" "host TEXT NOT NULL, " "exp_id INTEGER NOT NULL, " "FOREIGN KEY(exp_id) " "REFERENCES exps(id) ON DELETE CASCADE);\n") sql_cmd += ( "CREATE TABLE jobs (" "id INTEGER PRIMARY KEY, " "duration INTEGER NOT NULL, " "timestamp TEXT NOT NULL, " "data_type INTEGER, " "data_id TEXT);\n") # Create views sql_cmd += ( """ CREATE VIEW pools_view AS SELECT pool0.id, pool0.name, pool0.status, pool0.status_info, pool0.element_type, pool0.unsupported_actions, pool0.raid_type, pool0.member_type, pool0.parent_pool_id, pool0.strip_size, pool1.total_space total_space, pool1.total_space - pool2.vol_consumed_size - pool3.fs_consumed_size - pool4.sub_pool_consumed_size free_space FROM pools pool0 LEFT JOIN ( SELECT pool.id, ifnull(pool.total_space, ifnull(SUM(disk.total_space), 0)) total_space FROM pools pool LEFT JOIN disks disk ON pool.id = disk.owner_pool_id AND disk.role = 'DATA' GROUP BY pool.id ) pool1 ON pool0.id = pool1.id LEFT JOIN ( SELECT pool.id, ifnull(SUM(volume.consumed_size), 0) vol_consumed_size FROM pools pool LEFT JOIN volumes volume ON volume.pool_id = pool.id GROUP BY pool.id ) pool2 ON pool0.id = pool2.id LEFT JOIN ( SELECT pool.id, ifnull(SUM(fs.consumed_size), 0) fs_consumed_size FROM pools pool LEFT JOIN fss fs ON fs.pool_id = pool.id GROUP BY pool.id ) pool3 ON pool0.id = pool3.id LEFT JOIN ( SELECT pool.id, ifnull(SUM(sub_pool.total_space), 0) sub_pool_consumed_size FROM pools pool LEFT JOIN pools sub_pool ON sub_pool.parent_pool_id = pool.id GROUP BY pool.id ) pool4 ON pool0.id = pool4.id GROUP BY pool0.id; """) sql_cmd += ( """ CREATE VIEW disks_view AS SELECT id, disk_prefix || '_' || id name, total_space, disk_type, role, status, owner_pool_id FROM disks ; """) sql_cmd += ( """ CREATE VIEW volumes_by_ag_view AS SELECT vol.id, vol.vpd83, vol.name, vol.total_space, vol.consumed_size, vol.pool_id, vol.admin_state, vol.thinp, vol.is_hw_raid_vol, vol_mask.ag_id ag_id FROM volumes vol LEFT JOIN vol_masks vol_mask ON vol_mask.vol_id = vol.id ; """) sql_cmd += ( """ CREATE VIEW ags_view AS SELECT ag.id, ag.name, CASE WHEN count(DISTINCT init.init_type) = 1 THEN init.init_type WHEN count(DISTINCT init.init_type) = 2 THEN %s ELSE %s END init_type, group_concat(init.id, '%s') init_ids_str FROM ags ag LEFT JOIN inits init ON ag.id = init.owner_ag_id GROUP BY ag.id ORDER BY init.init_type ; """ % ( AccessGroup.INIT_TYPE_ISCSI_WWPN_MIXED, AccessGroup.INIT_TYPE_UNKNOWN, BackStore._LIST_SPLITTER)) sql_cmd += ( """ CREATE VIEW ags_by_vol_view AS SELECT ag_new.id, ag_new.name, ag_new.init_type, ag_new.init_ids_str, vol_mask.vol_id vol_id FROM ( SELECT ag.id, ag.name, CASE WHEN count(DISTINCT init.init_type) = 1 THEN init.init_type WHEN count(DISTINCT init.init_type) = 2 THEN %s ELSE %s END init_type, group_concat(init.id, '%s') init_ids_str FROM ags ag LEFT JOIN inits init ON ag.id = init.owner_ag_id GROUP BY ag.id ORDER BY init.init_type ) ag_new LEFT JOIN vol_masks vol_mask ON vol_mask.ag_id = ag_new.id ; """ % ( AccessGroup.INIT_TYPE_ISCSI_WWPN_MIXED, AccessGroup.INIT_TYPE_UNKNOWN, BackStore._LIST_SPLITTER)) sql_cmd += ( """ CREATE VIEW exps_view AS SELECT exp.id, exp.fs_id, exp.exp_path, exp.auth_type, exp.anon_uid, exp.anon_gid, exp.options, exp2.exp_root_hosts_str, exp3.exp_rw_hosts_str, exp4.exp_ro_hosts_str FROM exps exp LEFT JOIN ( SELECT exp_t2.id, group_concat( exp_root_host.host, '%s') exp_root_hosts_str FROM exps exp_t2 LEFT JOIN exp_root_hosts exp_root_host ON exp_t2.id = exp_root_host.exp_id GROUP BY exp_t2.id ) exp2 ON exp.id = exp2.id LEFT JOIN ( SELECT exp_t3.id, group_concat( exp_rw_host.host, '%s') exp_rw_hosts_str FROM exps exp_t3 LEFT JOIN exp_rw_hosts exp_rw_host ON exp_t3.id = exp_rw_host.exp_id GROUP BY exp_t3.id ) exp3 ON exp.id = exp3.id LEFT JOIN ( SELECT exp_t4.id, group_concat( exp_ro_host.host, '%s') exp_ro_hosts_str FROM exps exp_t4 LEFT JOIN exp_ro_hosts exp_ro_host ON exp_t4.id = exp_ro_host.exp_id GROUP BY exp_t4.id ) exp4 ON exp.id = exp4.id GROUP BY exp.id; ; """ % ( BackStore._LIST_SPLITTER, BackStore._LIST_SPLITTER, BackStore._LIST_SPLITTER)) sql_cur = self.sql_conn.cursor() try: sql_cur.executescript(sql_cmd) except sqlite3.OperationalError as sql_error: if 'already exists' in str(sql_error): pass else: raise sql_error except sqlite3.DatabaseError as sql_error: raise LsmError( ErrorNumber.INVALID_ARGUMENT, "Stored simulator state incompatible with " "simulator, please move or delete %s" % self.statefile) def _check_version(self): sim_syss = self.sim_syss() if len(sim_syss) == 0 or not sim_syss[0]: return False else: if 'version' in sim_syss[0] and \ sim_syss[0]['version'] == BackStore.VERSION_SIGNATURE: return True raise LsmError( ErrorNumber.INVALID_ARGUMENT, "Stored simulator state incompatible with " "simulator, please move or delete %s" % self.statefile) def check_version_and_init(self): """ Raise error if version not match. If empty database found, initiate. """ # The complex lock workflow is all caused by python sqlite3 do # autocommit for "CREATE TABLE" command. self.trans_begin() if self._check_version(): self.trans_commit() return else: self._data_add( 'systems', { 'id': BackStore.SYS_ID, 'name': BackStore.SYS_NAME, 'status': System.STATUS_OK, 'status_info': "", 'version': BackStore.VERSION_SIGNATURE, }) size_bytes_2t = size_human_2_size_bytes('2TiB') size_bytes_512g = size_human_2_size_bytes('512GiB') # Add 2 SATA disks(2TiB) pool_1_disks = [] for _ in range(0, 2): self._data_add( 'disks', { 'disk_prefix': "2TiB SATA Disk", 'total_space': size_bytes_2t, 'disk_type': Disk.TYPE_SATA, 'status': Disk.STATUS_OK, }) pool_1_disks.append(self.lastrowid) test_pool_disks = [] # Add 6 SAS disks(2TiB) for _ in range(0, 6): self._data_add( 'disks', { 'disk_prefix': "2TiB SAS Disk", 'total_space': size_bytes_2t, 'disk_type': Disk.TYPE_SAS, 'status': Disk.STATUS_OK, }) if len(test_pool_disks) < 2: test_pool_disks.append(self.lastrowid) ssd_pool_disks = [] # Add 5 SSD disks(512GiB) for _ in range(0, 5): self._data_add( 'disks', { 'disk_prefix': "512GiB SSD Disk", 'total_space': size_bytes_512g, 'disk_type': Disk.TYPE_SSD, 'status': Disk.STATUS_OK, }) if len(ssd_pool_disks) < 2: ssd_pool_disks.append(self.lastrowid) # Add 7 SSD disks(2TiB) for _ in range(0, 7): self._data_add( 'disks', { 'disk_prefix': "2TiB SSD Disk", 'total_space': size_bytes_2t, 'disk_type': Disk.TYPE_SSD, 'status': Disk.STATUS_OK, }) pool_1_id = self.sim_pool_create_from_disk( name='Pool 1', raid_type=Volume.RAID_TYPE_RAID1, sim_disk_ids=pool_1_disks, element_type=Pool.ELEMENT_TYPE_POOL | Pool.ELEMENT_TYPE_FS | Pool.ELEMENT_TYPE_VOLUME | Pool.ELEMENT_TYPE_DELTA | Pool.ELEMENT_TYPE_SYS_RESERVED, unsupported_actions=Pool.UNSUPPORTED_VOLUME_GROW | Pool.UNSUPPORTED_VOLUME_SHRINK) self.sim_pool_create_sub_pool( name='Pool 2(sub pool of Pool 1)', parent_pool_id=pool_1_id, element_type=Pool.ELEMENT_TYPE_FS | Pool.ELEMENT_TYPE_VOLUME | Pool.ELEMENT_TYPE_DELTA, size=size_bytes_512g) self.sim_pool_create_from_disk( name='Pool 3', raid_type=Volume.RAID_TYPE_RAID1, sim_disk_ids=ssd_pool_disks, element_type=Pool.ELEMENT_TYPE_FS | Pool.ELEMENT_TYPE_VOLUME | Pool.ELEMENT_TYPE_DELTA) self.sim_pool_create_from_disk( name='lsm_test_aggr', element_type=Pool.ELEMENT_TYPE_FS | Pool.ELEMENT_TYPE_VOLUME | Pool.ELEMENT_TYPE_DELTA, raid_type=Volume.RAID_TYPE_RAID0, sim_disk_ids=test_pool_disks) self._data_add( 'tgts', { 'port_type': TargetPort.TYPE_FC, 'service_address': '50:0a:09:86:99:4b:8d:c5', 'network_address': '50:0a:09:86:99:4b:8d:c5', 'physical_address': '50:0a:09:86:99:4b:8d:c5', 'physical_name': 'FC_a_0b', }) self._data_add( 'tgts', { 'port_type': TargetPort.TYPE_FCOE, 'service_address': '50:0a:09:86:99:4b:8d:c6', 'network_address': '50:0a:09:86:99:4b:8d:c6', 'physical_address': '50:0a:09:86:99:4b:8d:c6', 'physical_name': 'FCoE_b_0c', }) self._data_add( 'tgts', { 'port_type': TargetPort.TYPE_ISCSI, 'service_address': 'iqn.1986-05.com.example:sim-tgt-03', 'network_address': 'sim-iscsi-tgt-3.example.com:3260', 'physical_address': 'a4:4e:31:47:f4:e0', 'physical_name': 'iSCSI_c_0d', }) self._data_add( 'tgts', { 'port_type': TargetPort.TYPE_ISCSI, 'service_address': 'iqn.1986-05.com.example:sim-tgt-03', 'network_address': '10.0.0.1:3260', 'physical_address': 'a4:4e:31:47:f4:e1', 'physical_name': 'iSCSI_c_0e', }) self._data_add( 'tgts', { 'port_type': TargetPort.TYPE_ISCSI, 'service_address': 'iqn.1986-05.com.example:sim-tgt-03', 'network_address': '[2001:470:1f09:efe:a64e:31ff::1]:3260', 'physical_address': 'a4:4e:31:47:f4:e1', 'physical_name': 'iSCSI_c_0e', }) self.trans_commit() return def _sql_exec(self, sql_cmd, key_list=None): """ Execute sql command and get all output. If key_list is not None, will convert returned sql data to a list of dictionaries. """ sql_cur = self.sql_conn.cursor() sql_cur.execute(sql_cmd) self.lastrowid = sql_cur.lastrowid sql_output = sql_cur.fetchall() if key_list and sql_output: return list( dict(zip(key_list, value_list)) for value_list in sql_output if value_list) else: return sql_output def _get_table(self, table_name, key_list): sql_cmd = "SELECT %s FROM %s" % (",".join(key_list), table_name) return self._sql_exec(sql_cmd, key_list) def trans_begin(self): self.sql_conn.execute("BEGIN IMMEDIATE TRANSACTION;") def trans_commit(self): self.sql_conn.commit() def trans_rollback(self): self.sql_conn.rollback() def _data_add(self, table_name, data_dict): keys = data_dict.keys() values = ['' if v is None else str(v) for v in data_dict.values()] sql_cmd = "INSERT INTO %s (%s) VALUES (%s);" % \ (table_name, "'%s'" % ("', '".join(keys)), "'%s'" % ("', '".join(values))) self._sql_exec(sql_cmd) def _data_find(self, table, condition, key_list, flag_unique=False): sql_cmd = "SELECT %s FROM %s WHERE %s" % ( ",".join(key_list), table, condition) sim_datas = self._sql_exec(sql_cmd, key_list) if flag_unique: if len(sim_datas) == 0: return None elif len(sim_datas) == 1: return sim_datas[0] else: raise LsmError( ErrorNumber.PLUGIN_BUG, "_data_find(): Got non-unique data: %s" % locals()) else: return sim_datas def _data_update(self, table, data_id, column_name, value): if value is None: value = '' sql_cmd = "UPDATE %s SET %s='%s' WHERE id='%s'" % \ (table, column_name, value, data_id) self._sql_exec(sql_cmd) def _data_delete(self, table, condition): sql_cmd = "DELETE FROM %s WHERE %s;" % (table, condition) self._sql_exec(sql_cmd) def sim_job_create(self, job_data_type=None, data_id=None): """ Return a job id(Integer) """ self._data_add( "jobs", { "duration": os.getenv( "LSM_SIM_TIME", BackStore.JOB_DEFAULT_DURATION), "timestamp": time.time(), "data_type": job_data_type, "data_id": data_id, }) return self.lastrowid def sim_job_delete(self, sim_job_id): self._data_delete('jobs', 'id="%s"' % sim_job_id) def sim_job_status(self, sim_job_id): """ Return (progress, data_type, data) tuple. progress is the integer of percent. """ sim_job = self._data_find( 'jobs', 'id=%s' % sim_job_id, BackStore.JOB_KEY_LIST, flag_unique=True) if sim_job is None: raise LsmError( ErrorNumber.NOT_FOUND_JOB, "Job not found") progress = int( (time.time() - float(sim_job['timestamp'])) / sim_job['duration'] * 100) data = None data_type = None if progress >= 100: progress = 100 if sim_job['data_type'] == BackStore.JOB_DATA_TYPE_VOL: data = self.sim_vol_of_id(sim_job['data_id']) data_type = sim_job['data_type'] elif sim_job['data_type'] == BackStore.JOB_DATA_TYPE_FS: data = self.sim_fs_of_id(sim_job['data_id']) data_type = sim_job['data_type'] elif sim_job['data_type'] == BackStore.JOB_DATA_TYPE_FS_SNAP: data = self.sim_fs_snap_of_id(sim_job['data_id']) data_type = sim_job['data_type'] return (progress, data_type, data) def sim_syss(self): """ Return a list of sim_sys dict. """ return self._get_table('systems', BackStore.SYS_KEY_LIST) def sim_disk_ids_of_pool(self, sim_pool_id): return list( d['id'] for d in self._data_find( 'disks', 'owner_pool_id="%s"' % sim_pool_id, ['id'])) def sim_disks(self): """ Return a list of sim_disk dict. """ return self._get_table('disks_view', BackStore.DISK_KEY_LIST) def sim_pools(self): """ Return a list of sim_pool dict. """ return self._get_table('pools_view', BackStore.POOL_KEY_LIST) def sim_pool_of_id(self, sim_pool_id): return self._sim_data_of_id( "pools_view", sim_pool_id, BackStore.POOL_KEY_LIST, ErrorNumber.NOT_FOUND_POOL, "Pool") def sim_pool_create_from_disk(self, name, sim_disk_ids, raid_type, element_type, unsupported_actions=0, strip_size=0): if strip_size == 0: strip_size = BackStore.DEFAULT_STRIP_SIZE self._data_add( 'pools', { 'name': name, 'status': Pool.STATUS_OK, 'status_info': '', 'element_type': element_type, 'unsupported_actions': unsupported_actions, 'raid_type': raid_type, 'member_type': Pool.MEMBER_TYPE_DISK, 'strip_size': strip_size, }) data_disk_count = PoolRAID.data_disk_count( raid_type, len(sim_disk_ids)) # update disk owner sim_pool_id = self.lastrowid for sim_disk_id in sim_disk_ids[:data_disk_count]: self._data_update( 'disks', sim_disk_id, 'owner_pool_id', sim_pool_id) self._data_update( 'disks', sim_disk_id, 'role', 'DATA') for sim_disk_id in sim_disk_ids[data_disk_count:]: self._data_update( 'disks', sim_disk_id, 'owner_pool_id', sim_pool_id) self._data_update( 'disks', sim_disk_id, 'role', 'PARITY') return sim_pool_id def sim_pool_create_sub_pool(self, name, parent_pool_id, size, element_type, unsupported_actions=0): self._data_add( 'pools', { 'name': name, 'status': Pool.STATUS_OK, 'status_info': '', 'element_type': element_type, 'unsupported_actions': unsupported_actions, 'raid_type': Volume.RAID_TYPE_OTHER, 'member_type': Pool.MEMBER_TYPE_POOL, 'parent_pool_id': parent_pool_id, 'total_space': size, }) return self.lastrowid def sim_pool_disks_count(self, sim_pool_id): return self._sql_exec( "SELECT COUNT(id) FROM disks WHERE owner_pool_id=%s;" % sim_pool_id)[0][0] def sim_pool_data_disks_count(self, sim_pool_id=None): return self._sql_exec( "SELECT COUNT(id) FROM disks WHERE " "owner_pool_id=%s and role='DATA';" % sim_pool_id)[0][0] def sim_vols(self, sim_ag_id=None): """ Return a list of sim_vol dict. """ if sim_ag_id: return self._data_find( 'volumes_by_ag_view', 'ag_id=%s' % sim_ag_id, BackStore.VOL_KEY_LIST) else: return self._get_table('volumes', BackStore.VOL_KEY_LIST) def _sim_data_of_id(self, table_name, data_id, key_list, lsm_error_no, data_name): sim_data = self._data_find( table_name, 'id=%s' % data_id, key_list, flag_unique=True) if sim_data is None: if lsm_error_no: raise LsmError( lsm_error_no, "%s not found" % data_name) else: return None return sim_data def sim_vol_of_id(self, sim_vol_id): """ Return sim_vol if found. Raise error if not found. """ return self._sim_data_of_id( "volumes", sim_vol_id, BackStore.VOL_KEY_LIST, ErrorNumber.NOT_FOUND_VOLUME, "Volume") def _check_pool_free_space(self, sim_pool_id, size_bytes): sim_pool = self.sim_pool_of_id(sim_pool_id) if (sim_pool['free_space'] < size_bytes): raise LsmError(ErrorNumber.NOT_ENOUGH_SPACE, "Insufficient space in pool") @staticmethod def _block_rounding(size_bytes): return (size_bytes + BackStore.BLK_SIZE - 1) / \ BackStore.BLK_SIZE * BackStore.BLK_SIZE def sim_vol_create(self, name, size_bytes, sim_pool_id, thinp, is_hw_raid_vol=0): size_bytes = BackStore._block_rounding(size_bytes) self._check_pool_free_space(sim_pool_id, size_bytes) sim_vol = dict() sim_vol['vpd83'] = _random_vpd() sim_vol['name'] = name sim_vol['thinp'] = thinp sim_vol['pool_id'] = sim_pool_id sim_vol['total_space'] = size_bytes sim_vol['consumed_size'] = size_bytes sim_vol['admin_state'] = Volume.ADMIN_STATE_ENABLED sim_vol['is_hw_raid_vol'] = is_hw_raid_vol try: self._data_add("volumes", sim_vol) except sqlite3.IntegrityError as sql_error: raise LsmError( ErrorNumber.NAME_CONFLICT, "Name '%s' is already in use by other volume" % name) return self.lastrowid def sim_vol_delete(self, sim_vol_id): """ This does not check whether volume exist or not. """ # Check existence. sim_vol = self.sim_vol_of_id(sim_vol_id) if self._sim_ag_ids_of_masked_vol(sim_vol_id): raise LsmError( ErrorNumber.IS_MASKED, "Volume is masked to access group") dst_sim_vol_ids = self.dst_sim_vol_ids_of_src(sim_vol_id) if len(dst_sim_vol_ids) >= 1: for dst_sim_vol_id in dst_sim_vol_ids: if dst_sim_vol_id != sim_vol_id: # Don't raise error on volume internal replication. raise LsmError( ErrorNumber.PLUGIN_BUG, "Requested volume is a replication source") if sim_vol['is_hw_raid_vol']: # Delete the parent pool instead if found a HW RAID volume. self._data_delete("pools", 'id="%s"' % sim_vol['pool_id']) else: self._data_delete("volumes", 'id="%s"' % sim_vol_id) def sim_vol_mask(self, sim_vol_id, sim_ag_id): self.sim_vol_of_id(sim_vol_id) self.sim_ag_of_id(sim_ag_id) exist_mask = self._data_find( 'vol_masks', 'ag_id="%s" AND vol_id="%s"' % (sim_ag_id, sim_vol_id), ['vol_id']) if exist_mask: raise LsmError( ErrorNumber.NO_STATE_CHANGE, "Volume is already masked to requested access group") self._data_add( "vol_masks", {'ag_id': sim_ag_id, 'vol_id': sim_vol_id}) return None def sim_vol_unmask(self, sim_vol_id, sim_ag_id): self.sim_vol_of_id(sim_vol_id) self.sim_ag_of_id(sim_ag_id) condition = 'ag_id="%s" AND vol_id="%s"' % (sim_ag_id, sim_vol_id) exist_mask = self._data_find('vol_masks', condition, ['vol_id']) if exist_mask: self._data_delete('vol_masks', condition) else: raise LsmError( ErrorNumber.NO_STATE_CHANGE, "Volume is not masked to requested access group") return None def _sim_vol_ids_of_masked_ag(self, sim_ag_id): return list( m['vol_id'] for m in self._data_find( 'vol_masks', 'ag_id="%s"' % sim_ag_id, ['vol_id'])) def _sim_ag_ids_of_masked_vol(self, sim_vol_id): return list( m['ag_id'] for m in self._data_find( 'vol_masks', 'vol_id="%s"' % sim_vol_id, ['ag_id'])) def sim_vol_resize(self, sim_vol_id, new_size_bytes): org_new_size_bytes = new_size_bytes new_size_bytes = BackStore._block_rounding(new_size_bytes) sim_vol = self.sim_vol_of_id(sim_vol_id) if sim_vol['total_space'] == new_size_bytes: if org_new_size_bytes != new_size_bytes: # Even volume size is identical to rounded size, # but it's not what user requested, hence we silently pass. return else: raise LsmError( ErrorNumber.NO_STATE_CHANGE, "Volume size is identical to requested") sim_pool = self.sim_pool_of_id(sim_vol['pool_id']) increment = new_size_bytes - sim_vol['total_space'] if increment > 0: if sim_pool['unsupported_actions'] & Pool.UNSUPPORTED_VOLUME_GROW: raise LsmError( ErrorNumber.NO_SUPPORT, "Requested pool does not allow volume size grow") if sim_pool['free_space'] < increment: raise LsmError( ErrorNumber.NOT_ENOUGH_SPACE, "Insufficient space in pool") elif sim_pool['unsupported_actions'] & Pool.UNSUPPORTED_VOLUME_SHRINK: raise LsmError( ErrorNumber.NO_SUPPORT, "Requested pool does not allow volume size grow") # TODO(Gris Ge): If a volume is in a replication relationship, resize # should be handled properly. self._data_update( 'volumes', sim_vol_id, "total_space", new_size_bytes) self._data_update( 'volumes', sim_vol_id, "consumed_size", new_size_bytes) def dst_sim_vol_ids_of_src(self, src_sim_vol_id): """ Return a list of dst_vol_id for provided source volume ID. """ self.sim_vol_of_id(src_sim_vol_id) return list( d['dst_vol_id'] for d in self._data_find( 'vol_reps', 'src_vol_id="%s"' % src_sim_vol_id, ['dst_vol_id'])) def sim_vol_replica(self, src_sim_vol_id, dst_sim_vol_id, rep_type, blk_ranges=None): self.sim_vol_of_id(src_sim_vol_id) self.sim_vol_of_id(dst_sim_vol_id) # TODO(Gris Ge): Use consumed_size < total_space to reflect the CLONE # type. cur_src_sim_vol_ids = list( r['src_vol_id'] for r in self._data_find( 'vol_reps', 'dst_vol_id="%s"' % dst_sim_vol_id, ['src_vol_id'])) if len(cur_src_sim_vol_ids) == 1 and \ cur_src_sim_vol_ids[0] == src_sim_vol_id: # src and dst match. Maybe user are overriding old setting. pass elif len(cur_src_sim_vol_ids) == 0: pass else: # TODO(Gris Ge): Need to introduce new API error raise LsmError( ErrorNumber.PLUGIN_BUG, "Target volume is already a replication target for other " "source volume") self._data_add( 'vol_reps', { 'src_vol_id': src_sim_vol_id, 'dst_vol_id': dst_sim_vol_id, 'rep_type': rep_type, }) # No need to trace block range due to lack of query method. def sim_vol_src_replica_break(self, src_sim_vol_id): if not self.dst_sim_vol_ids_of_src(src_sim_vol_id): raise LsmError( ErrorNumber.NO_STATE_CHANGE, "Provided volume is not a replication source") self._data_delete( 'vol_reps', 'src_vol_id="%s"' % src_sim_vol_id) def sim_vol_state_change(self, sim_vol_id, new_admin_state): sim_vol = self.sim_vol_of_id(sim_vol_id) if sim_vol['admin_state'] == new_admin_state: raise LsmError( ErrorNumber.NO_STATE_CHANGE, "Volume admin state is identical to requested") self._data_update( 'volumes', sim_vol_id, "admin_state", new_admin_state) @staticmethod def _sim_ag_format(sim_ag): """ Update 'init_type' and 'init_ids' of sim_ag """ sim_ag['init_ids'] = sim_ag['init_ids_str'].split( BackStore._LIST_SPLITTER) del sim_ag['init_ids_str'] return sim_ag def sim_ags(self, sim_vol_id=None): if sim_vol_id: sim_ags = self._data_find( 'ags_by_vol_view', 'vol_id=%s' % sim_vol_id, BackStore.AG_KEY_LIST) else: sim_ags = self._get_table('ags_view', BackStore.AG_KEY_LIST) return [BackStore._sim_ag_format(a) for a in sim_ags] def _sim_init_create(self, init_type, init_id, sim_ag_id): try: self._data_add( "inits", { 'id': init_id, 'init_type': init_type, 'owner_ag_id': sim_ag_id }) except sqlite3.IntegrityError as sql_error: raise LsmError( ErrorNumber.EXISTS_INITIATOR, "Initiator '%s' is already in use by other access group" % init_id) def iscsi_chap_auth_set(self, init_id, in_user, in_pass, out_user, out_pass): # Currently, there is no API method to query status of iscsi CHAP. return None def sim_ag_create(self, name, init_type, init_id): try: self._data_add("ags", {'name': name}) sim_ag_id = self.lastrowid except sqlite3.IntegrityError as sql_error: raise LsmError( ErrorNumber.NAME_CONFLICT, "Name '%s' is already in use by other access group" % name) self._sim_init_create(init_type, init_id, sim_ag_id) return sim_ag_id def sim_ag_delete(self, sim_ag_id): self.sim_ag_of_id(sim_ag_id) if self._sim_vol_ids_of_masked_ag(sim_ag_id): raise LsmError( ErrorNumber.IS_MASKED, "Access group has volume masked to") self._data_delete('ags', 'id="%s"' % sim_ag_id) def sim_ag_init_add(self, sim_ag_id, init_id, init_type): sim_ag = self.sim_ag_of_id(sim_ag_id) if init_id in sim_ag['init_ids']: raise LsmError( ErrorNumber.NO_STATE_CHANGE, "Initiator already in access group") if init_type != AccessGroup.INIT_TYPE_ISCSI_IQN and \ init_type != AccessGroup.INIT_TYPE_WWPN: raise LsmError( ErrorNumber.NO_SUPPORT, "Only support iSCSI IQN and WWPN initiator type") self._sim_init_create(init_type, init_id, sim_ag_id) return None def sim_ag_init_delete(self, sim_ag_id, init_id): sim_ag = self.sim_ag_of_id(sim_ag_id) if init_id not in sim_ag['init_ids']: raise LsmError( ErrorNumber.NO_STATE_CHANGE, "Initiator is not in defined access group") if len(sim_ag['init_ids']) == 1: raise LsmError( ErrorNumber.LAST_INIT_IN_ACCESS_GROUP, "Refused to remove the last initiator from access group") self._data_delete('inits', 'id="%s"' % init_id) def sim_ag_of_id(self, sim_ag_id): sim_ag = self._sim_data_of_id( "ags_view", sim_ag_id, BackStore.AG_KEY_LIST, ErrorNumber.NOT_FOUND_ACCESS_GROUP, "Access Group") BackStore._sim_ag_format(sim_ag) return sim_ag def sim_fss(self): """ Return a list of sim_fs dict. """ return self._get_table('fss', BackStore.FS_KEY_LIST) def sim_fs_of_id(self, sim_fs_id, raise_error=True): lsm_error_no = ErrorNumber.NOT_FOUND_FS if not raise_error: lsm_error_no = None return self._sim_data_of_id( "fss", sim_fs_id, BackStore.FS_KEY_LIST, lsm_error_no, "File System") def sim_fs_create(self, name, size_bytes, sim_pool_id): size_bytes = BackStore._block_rounding(size_bytes) self._check_pool_free_space(sim_pool_id, size_bytes) try: self._data_add( "fss", { 'name': name, 'total_space': size_bytes, 'consumed_size': size_bytes, 'free_space': size_bytes, 'pool_id': sim_pool_id, }) except sqlite3.IntegrityError as sql_error: raise LsmError( ErrorNumber.NAME_CONFLICT, "Name '%s' is already in use by other fs" % name) return self.lastrowid def sim_fs_delete(self, sim_fs_id): self.sim_fs_of_id(sim_fs_id) if self.clone_dst_sim_fs_ids_of_src(sim_fs_id): # TODO(Gris Ge): API does not have dedicate error for this # scenario. raise LsmError( ErrorNumber.PLUGIN_BUG, "Requested file system is a clone source") if self.sim_fs_snaps(sim_fs_id): raise LsmError( ErrorNumber.PLUGIN_BUG, "Requested file system has snapshot attached") if self._data_find('exps', 'fs_id="%s"' % sim_fs_id, ['id']): # TODO(Gris Ge): API does not have dedicate error for this # scenario raise LsmError( ErrorNumber.PLUGIN_BUG, "Requested file system is exported via NFS") self._data_delete("fss", 'id="%s"' % sim_fs_id) def sim_fs_resize(self, sim_fs_id, new_size_bytes): org_new_size_bytes = new_size_bytes new_size_bytes = BackStore._block_rounding(new_size_bytes) sim_fs = self.sim_fs_of_id(sim_fs_id) if sim_fs['total_space'] == new_size_bytes: if new_size_bytes != org_new_size_bytes: return else: raise LsmError( ErrorNumber.NO_STATE_CHANGE, "File System size is identical to requested") # TODO(Gris Ge): If a fs is in a clone/snapshot relationship, resize # should be handled properly. sim_pool = self.sim_pool_of_id(sim_fs['pool_id']) if new_size_bytes > sim_fs['total_space'] and \ sim_pool['free_space'] < new_size_bytes - sim_fs['total_space']: raise LsmError( ErrorNumber.NOT_ENOUGH_SPACE, "Insufficient space in pool") self._data_update( 'fss', sim_fs_id, "total_space", new_size_bytes) self._data_update( 'fss', sim_fs_id, "consumed_size", new_size_bytes) self._data_update( 'fss', sim_fs_id, "free_space", new_size_bytes) def sim_fs_snaps(self, sim_fs_id): self.sim_fs_of_id(sim_fs_id) return self._data_find( 'fs_snaps', 'fs_id="%s"' % sim_fs_id, BackStore.FS_SNAP_KEY_LIST) def sim_fs_snap_of_id(self, sim_fs_snap_id, sim_fs_id=None): sim_fs_snap = self._sim_data_of_id( 'fs_snaps', sim_fs_snap_id, BackStore.FS_SNAP_KEY_LIST, ErrorNumber.NOT_FOUND_FS_SS, 'File system snapshot') if sim_fs_id and sim_fs_snap['fs_id'] != sim_fs_id: raise LsmError( ErrorNumber.NOT_FOUND_FS_SS, "Defined file system snapshot ID is not belong to requested " "file system") return sim_fs_snap def sim_fs_snap_create(self, sim_fs_id, name): self.sim_fs_of_id(sim_fs_id) try: self._data_add( 'fs_snaps', { 'name': name, 'fs_id': sim_fs_id, 'timestamp': int(time.time()), }) except sqlite3.IntegrityError as sql_error: raise LsmError( ErrorNumber.NAME_CONFLICT, "The name is already used by other file system snapshot") return self.lastrowid def sim_fs_snap_restore(self, sim_fs_id, sim_fs_snap_id, files, restore_files, flag_all_files): # Currently LSM cannot query stauts of this action. # we simply check existence self.sim_fs_of_id(sim_fs_id) if sim_fs_snap_id: self.sim_fs_snap_of_id(sim_fs_snap_id, sim_fs_id) return def sim_fs_snap_delete(self, sim_fs_snap_id, sim_fs_id): self.sim_fs_of_id(sim_fs_id) self.sim_fs_snap_of_id(sim_fs_snap_id, sim_fs_id) self._data_delete('fs_snaps', 'id="%s"' % sim_fs_snap_id) def sim_fs_snap_del_by_fs(self, sim_fs_id): sql_cmd = "DELETE FROM fs_snaps WHERE fs_id='%s';" % sim_fs_id self._sql_exec(sql_cmd) def sim_fs_clone(self, src_sim_fs_id, dst_sim_fs_id, sim_fs_snap_id): self.sim_fs_of_id(src_sim_fs_id) self.sim_fs_of_id(dst_sim_fs_id) if sim_fs_snap_id: # No need to trace state of snap id here due to lack of # query method. # We just check snapshot existence self.sim_fs_snap_of_id(sim_fs_snap_id, src_sim_fs_id) self._data_add( 'fs_clones', { 'src_fs_id': src_sim_fs_id, 'dst_fs_id': dst_sim_fs_id, }) def sim_fs_file_clone(self, sim_fs_id, src_fs_name, dst_fs_name, sim_fs_snap_id): # We don't have API to query file level clone. # Simply check existence self.sim_fs_of_id(sim_fs_id) if sim_fs_snap_id: self.sim_fs_snap_of_id(sim_fs_snap_id, sim_fs_id) return def clone_dst_sim_fs_ids_of_src(self, src_sim_fs_id): """ Return a list of dst_fs_id for provided clone source fs ID. """ self.sim_fs_of_id(src_sim_fs_id) return list( d['dst_fs_id'] for d in self._data_find( 'fs_clones', 'src_fs_id="%s"' % src_sim_fs_id, ['dst_fs_id'])) def sim_fs_src_clone_break(self, src_sim_fs_id): self._data_delete('fs_clones', 'src_fs_id="%s"' % src_sim_fs_id) def _sim_exp_format(self, sim_exp): for key_name in ['root_hosts', 'rw_hosts', 'ro_hosts']: table_name = "exp_%s_str" % key_name if sim_exp[table_name]: sim_exp[key_name] = sim_exp[table_name].split( BackStore._LIST_SPLITTER) else: sim_exp[key_name] = [] del sim_exp[table_name] return sim_exp def sim_exps(self): return list( self._sim_exp_format(e) for e in self._get_table('exps_view', BackStore.EXP_KEY_LIST)) def sim_exp_of_id(self, sim_exp_id): return self._sim_exp_format( self._sim_data_of_id( 'exps_view', sim_exp_id, BackStore.EXP_KEY_LIST, ErrorNumber.NOT_FOUND_NFS_EXPORT, 'NFS Export')) def sim_exp_create(self, sim_fs_id, exp_path, root_hosts, rw_hosts, ro_hosts, anon_uid, anon_gid, auth_type, options): if exp_path is None: exp_path = "/nfs_exp_%s" % _random_vpd()[:8] self.sim_fs_of_id(sim_fs_id) try: self._data_add( 'exps', { 'fs_id': sim_fs_id, 'exp_path': exp_path, 'anon_uid': anon_uid, 'anon_gid': anon_gid, 'auth_type': auth_type, 'options': options, }) except sqlite3.IntegrityError as sql_error: # TODO(Gris Ge): Should we create new error instead of # NAME_CONFLICT? raise LsmError( ErrorNumber.NAME_CONFLICT, "Export path is already used by other NFS export") sim_exp_id = self.lastrowid for root_host in root_hosts: self._data_add( 'exp_root_hosts', { 'host': root_host, 'exp_id': sim_exp_id, }) for rw_host in rw_hosts: self._data_add( 'exp_rw_hosts', { 'host': rw_host, 'exp_id': sim_exp_id, }) for ro_host in ro_hosts: self._data_add( 'exp_ro_hosts', { 'host': ro_host, 'exp_id': sim_exp_id, }) return sim_exp_id def sim_exp_delete(self, sim_exp_id): self.sim_exp_of_id(sim_exp_id) self._data_delete('exps', 'id="%s"' % sim_exp_id) def sim_tgts(self): """ Return a list of sim_tgt dict. """ return self._get_table('tgts', BackStore.TGT_KEY_LIST) class SimArray(object): SIM_DATA_FILE = os.getenv("LSM_SIM_DATA", tempfile.gettempdir() + '/lsm_sim_data') ID_FMT = 5 @staticmethod def _sim_id_to_lsm_id(sim_id, prefix): return "%s_ID_%0*d" % (prefix, SimArray.ID_FMT, sim_id) @staticmethod def _lsm_id_to_sim_id(lsm_id, lsm_error): try: return int(lsm_id[-SimArray.ID_FMT:]) except ValueError: raise lsm_error @staticmethod def _sim_job_id_of(job_id): return SimArray._lsm_id_to_sim_id( job_id, LsmError(ErrorNumber.NOT_FOUND_JOB, "Job not found")) @staticmethod def _sim_pool_id_of(pool_id): return SimArray._lsm_id_to_sim_id( pool_id, LsmError(ErrorNumber.NOT_FOUND_POOL, "Pool not found")) @staticmethod def _sim_vol_id_of(vol_id): return SimArray._lsm_id_to_sim_id( vol_id, LsmError( ErrorNumber.NOT_FOUND_VOLUME, "Volume not found")) @staticmethod def _sim_fs_id_of(fs_id): return SimArray._lsm_id_to_sim_id( fs_id, LsmError( ErrorNumber.NOT_FOUND_FS, "File system not found")) @staticmethod def _sim_fs_snap_id_of(snap_id): return SimArray._lsm_id_to_sim_id( snap_id, LsmError( ErrorNumber.NOT_FOUND_FS_SS, "File system snapshot not found")) @staticmethod def _sim_exp_id_of(exp_id): return SimArray._lsm_id_to_sim_id( exp_id, LsmError( ErrorNumber.NOT_FOUND_NFS_EXPORT, "File system export not found")) @staticmethod def _sim_ag_id_of(ag_id): return SimArray._lsm_id_to_sim_id( ag_id, LsmError( ErrorNumber.NOT_FOUND_NFS_EXPORT, "File system export not found")) @_handle_errors def __init__(self, statefile, timeout): if statefile is None: statefile = SimArray.SIM_DATA_FILE self.bs_obj = BackStore(statefile, timeout) self.bs_obj.check_version_and_init() self.statefile = statefile self.timeout = timeout def _job_create(self, data_type=None, sim_data_id=None): sim_job_id = self.bs_obj.sim_job_create( data_type, sim_data_id) return SimArray._sim_id_to_lsm_id(sim_job_id, 'JOB') @_handle_errors def job_status(self, job_id, flags=0): sim_job_id = SimArray._sim_job_id_of(job_id) (progress, data_type, sim_data) = self.bs_obj.sim_job_status( sim_job_id) status = JobStatus.INPROGRESS if progress == 100: status = JobStatus.COMPLETE data = None if data_type == BackStore.JOB_DATA_TYPE_VOL: data = SimArray._sim_vol_2_lsm(sim_data) elif data_type == BackStore.JOB_DATA_TYPE_FS: data = SimArray._sim_fs_2_lsm(sim_data) elif data_type == BackStore.JOB_DATA_TYPE_FS_SNAP: data = SimArray._sim_fs_snap_2_lsm(sim_data) return (status, progress, data) @_handle_errors def job_free(self, job_id, flags=0): self.bs_obj.trans_begin() self.bs_obj.sim_job_delete(SimArray._sim_job_id_of(job_id)) self.bs_obj.trans_commit() return None @_handle_errors def time_out_set(self, ms, flags=0): self.bs_obj = BackStore(self.statefile, int(ms/1000)) self.timeout = ms return None @_handle_errors def time_out_get(self, flags=0): return self.timeout @staticmethod def _sim_sys_2_lsm(sim_sys): return System( sim_sys['id'], sim_sys['name'], sim_sys['status'], sim_sys['status_info']) @_handle_errors def systems(self): return list( SimArray._sim_sys_2_lsm(sim_sys) for sim_sys in self.bs_obj.sim_syss()) @staticmethod def _sim_vol_2_lsm(sim_vol): vol_id = SimArray._sim_id_to_lsm_id(sim_vol['id'], 'VOL') pool_id = SimArray._sim_id_to_lsm_id(sim_vol['pool_id'], 'POOL') return Volume(vol_id, sim_vol['name'], sim_vol['vpd83'], BackStore.BLK_SIZE, int(sim_vol['total_space'] / BackStore.BLK_SIZE), sim_vol['admin_state'], BackStore.SYS_ID, pool_id) @_handle_errors def volumes(self): return list( SimArray._sim_vol_2_lsm(v) for v in self.bs_obj.sim_vols()) @staticmethod def _sim_pool_2_lsm(sim_pool): pool_id = SimArray._sim_id_to_lsm_id(sim_pool['id'], 'POOL') name = sim_pool['name'] total_space = sim_pool['total_space'] free_space = sim_pool['free_space'] status = sim_pool['status'] status_info = sim_pool['status_info'] sys_id = BackStore.SYS_ID element_type = sim_pool['element_type'] unsupported_actions = sim_pool['unsupported_actions'] return Pool( pool_id, name, element_type, unsupported_actions, total_space, free_space, status, status_info, sys_id) @_handle_errors def pools(self, flags=0): self.bs_obj.trans_begin() sim_pools = self.bs_obj.sim_pools() self.bs_obj.trans_rollback() return list( SimArray._sim_pool_2_lsm(sim_pool) for sim_pool in sim_pools) @staticmethod def _sim_disk_2_lsm(sim_disk): disk_status = Disk.STATUS_OK if sim_disk['role'] is None: disk_status |= Disk.STATUS_FREE return Disk( SimArray._sim_id_to_lsm_id(sim_disk['id'], 'DISK'), sim_disk['name'], sim_disk['disk_type'], BackStore.BLK_SIZE, int(sim_disk['total_space'] / BackStore.BLK_SIZE), disk_status, BackStore.SYS_ID) @_handle_errors def disks(self): return list( SimArray._sim_disk_2_lsm(sim_disk) for sim_disk in self.bs_obj.sim_disks()) @_handle_errors def volume_create(self, pool_id, vol_name, size_bytes, thinp, flags=0, _internal_use=False, _is_hw_raid_vol=0): """ The '_internal_use' parameter is only for SimArray internal use. This method will return the new sim_vol id instead of job_id when '_internal_use' marked as True. """ if _internal_use is False: self.bs_obj.trans_begin() new_sim_vol_id = self.bs_obj.sim_vol_create( vol_name, size_bytes, SimArray._sim_pool_id_of(pool_id), thinp, is_hw_raid_vol=_is_hw_raid_vol) if _internal_use: return new_sim_vol_id job_id = self._job_create( BackStore.JOB_DATA_TYPE_VOL, new_sim_vol_id) self.bs_obj.trans_commit() return job_id, None @_handle_errors def volume_delete(self, vol_id, flags=0): self.bs_obj.trans_begin() self.bs_obj.sim_vol_delete(SimArray._sim_vol_id_of(vol_id)) job_id = self._job_create() self.bs_obj.trans_commit() return job_id @_handle_errors def volume_resize(self, vol_id, new_size_bytes, flags=0): self.bs_obj.trans_begin() sim_vol_id = SimArray._sim_vol_id_of(vol_id) self.bs_obj.sim_vol_resize(sim_vol_id, new_size_bytes) job_id = self._job_create( BackStore.JOB_DATA_TYPE_VOL, sim_vol_id) self.bs_obj.trans_commit() return job_id, None @_handle_errors def volume_replicate(self, dst_pool_id, rep_type, src_vol_id, new_vol_name, flags=0): self.bs_obj.trans_begin() src_sim_vol_id = SimArray._sim_pool_id_of(src_vol_id) # Verify the existence of source volume src_sim_vol = self.bs_obj.sim_vol_of_id(src_sim_vol_id) dst_sim_vol_id = self.volume_create( dst_pool_id, new_vol_name, src_sim_vol['total_space'], src_sim_vol['thinp'], _internal_use=True) self.bs_obj.sim_vol_replica(src_sim_vol_id, dst_sim_vol_id, rep_type) job_id = self._job_create( BackStore.JOB_DATA_TYPE_VOL, dst_sim_vol_id) self.bs_obj.trans_commit() return job_id, None @_handle_errors def volume_replicate_range_block_size(self, sys_id, flags=0): if sys_id != BackStore.SYS_ID: raise LsmError( ErrorNumber.NOT_FOUND_SYSTEM, "System not found") return BackStore.BLK_SIZE @_handle_errors def volume_replicate_range(self, rep_type, src_vol_id, dst_vol_id, ranges, flags=0): self.bs_obj.trans_begin() # TODO(Gris Ge): check whether star_blk + count is out of volume # boundary # TODO(Gris Ge): Should check block overlap. self.bs_obj.sim_vol_replica( SimArray._sim_pool_id_of(src_vol_id), SimArray._sim_pool_id_of(dst_vol_id), rep_type, ranges) job_id = self._job_create() self.bs_obj.trans_commit() return job_id @_handle_errors def volume_enable(self, vol_id, flags=0): self.bs_obj.trans_begin() self.bs_obj.sim_vol_state_change( SimArray._sim_vol_id_of(vol_id), Volume.ADMIN_STATE_ENABLED) self.bs_obj.trans_commit() return None @_handle_errors def volume_disable(self, vol_id, flags=0): self.bs_obj.trans_begin() self.bs_obj.sim_vol_state_change( SimArray._sim_vol_id_of(vol_id), Volume.ADMIN_STATE_DISABLED) self.bs_obj.trans_commit() return None @_handle_errors def volume_child_dependency(self, vol_id, flags=0): # TODO(Gris Ge): API defination is blur: # 0. Should we break replication if provided volume is a # replication target? # Assuming answer is no. # 1. _client.py comments incorrect: # "Implies that this volume cannot be deleted or possibly # modified because it would affect its children" # The 'modify' here is incorrect. If data on source volume # changes, SYNC_MIRROR replication will change all target # volumes. # 2. Should 'mask' relationship included? # # Assuming only replication counts here. # 3. For volume internal block replication, should we return # True or False. # # Assuming False # 4. volume_child_dependency_rm() against volume internal # block replication, remove replication or raise error? # # Assuming remove replication src_sim_vol_id = SimArray._sim_vol_id_of(vol_id) dst_sim_vol_ids = self.bs_obj.dst_sim_vol_ids_of_src(src_sim_vol_id) for dst_sim_fs_id in dst_sim_vol_ids: if dst_sim_fs_id != src_sim_vol_id: return True return False @_handle_errors def volume_child_dependency_rm(self, vol_id, flags=0): self.bs_obj.trans_begin() self.bs_obj.sim_vol_src_replica_break( SimArray._sim_vol_id_of(vol_id)) job_id = self._job_create() self.bs_obj.trans_commit() return job_id @staticmethod def _sim_fs_2_lsm(sim_fs): fs_id = SimArray._sim_id_to_lsm_id(sim_fs['id'], 'FS') pool_id = SimArray._sim_id_to_lsm_id(sim_fs['id'], 'POOL') return FileSystem(fs_id, sim_fs['name'], sim_fs['total_space'], sim_fs['free_space'], pool_id, BackStore.SYS_ID) @_handle_errors def fs(self): return list(SimArray._sim_fs_2_lsm(f) for f in self.bs_obj.sim_fss()) @_handle_errors def fs_create(self, pool_id, fs_name, size_bytes, flags=0, _internal_use=False): if not _internal_use: self.bs_obj.trans_begin() new_sim_fs_id = self.bs_obj.sim_fs_create( fs_name, size_bytes, SimArray._sim_pool_id_of(pool_id)) if _internal_use: return new_sim_fs_id job_id = self._job_create( BackStore.JOB_DATA_TYPE_FS, new_sim_fs_id) self.bs_obj.trans_commit() return job_id, None @_handle_errors def fs_delete(self, fs_id, flags=0): self.bs_obj.trans_begin() self.bs_obj.sim_fs_delete(SimArray._sim_fs_id_of(fs_id)) job_id = self._job_create() self.bs_obj.trans_commit() return job_id @_handle_errors def fs_resize(self, fs_id, new_size_bytes, flags=0): sim_fs_id = SimArray._sim_fs_id_of(fs_id) self.bs_obj.trans_begin() self.bs_obj.sim_fs_resize(sim_fs_id, new_size_bytes) job_id = self._job_create(BackStore.JOB_DATA_TYPE_FS, sim_fs_id) self.bs_obj.trans_commit() return job_id, None @_handle_errors def fs_clone(self, src_fs_id, dst_fs_name, snap_id, flags=0): self.bs_obj.trans_begin() sim_fs_snap_id = None if snap_id: sim_fs_snap_id = SimArray._sim_fs_snap_id_of(snap_id) src_sim_fs_id = SimArray._sim_fs_id_of(src_fs_id) src_sim_fs = self.bs_obj.sim_fs_of_id(src_sim_fs_id) pool_id = SimArray._sim_id_to_lsm_id(src_sim_fs['pool_id'], 'POOL') dst_sim_fs_id = self.fs_create( pool_id, dst_fs_name, src_sim_fs['total_space'], _internal_use=True) self.bs_obj.sim_fs_clone(src_sim_fs_id, dst_sim_fs_id, sim_fs_snap_id) job_id = self._job_create( BackStore.JOB_DATA_TYPE_FS, dst_sim_fs_id) self.bs_obj.trans_commit() return job_id, None @_handle_errors def fs_file_clone(self, fs_id, src_fs_name, dst_fs_name, snap_id, flags=0): self.bs_obj.trans_begin() sim_fs_snap_id = None if snap_id: sim_fs_snap_id = SimArray._sim_fs_snap_id_of(snap_id) self.bs_obj.sim_fs_file_clone( SimArray._sim_fs_id_of(fs_id), src_fs_name, dst_fs_name, sim_fs_snap_id) job_id = self._job_create() self.bs_obj.trans_commit() return job_id @staticmethod def _sim_fs_snap_2_lsm(sim_fs_snap): snap_id = SimArray._sim_id_to_lsm_id(sim_fs_snap['id'], 'FS_SNAP') return FsSnapshot( snap_id, sim_fs_snap['name'], sim_fs_snap['timestamp']) @_handle_errors def fs_snapshots(self, fs_id, flags=0): return list( SimArray._sim_fs_snap_2_lsm(s) for s in self.bs_obj.sim_fs_snaps( SimArray._sim_fs_id_of(fs_id))) @_handle_errors def fs_snapshot_create(self, fs_id, snap_name, flags=0): self.bs_obj.trans_begin() sim_fs_snap_id = self.bs_obj.sim_fs_snap_create( SimArray._sim_fs_id_of(fs_id), snap_name) job_id = self._job_create( BackStore.JOB_DATA_TYPE_FS_SNAP, sim_fs_snap_id) self.bs_obj.trans_commit() return job_id, None @_handle_errors def fs_snapshot_delete(self, fs_id, snap_id, flags=0): self.bs_obj.trans_begin() self.bs_obj.sim_fs_snap_delete( SimArray._sim_fs_snap_id_of(snap_id), SimArray._sim_fs_id_of(fs_id)) job_id = self._job_create() self.bs_obj.trans_commit() return job_id @_handle_errors def fs_snapshot_restore(self, fs_id, snap_id, files, restore_files, flag_all_files, flags): self.bs_obj.trans_begin() sim_fs_snap_id = None if snap_id: sim_fs_snap_id = SimArray._sim_fs_snap_id_of(snap_id) self.bs_obj.sim_fs_snap_restore( SimArray._sim_fs_id_of(fs_id), sim_fs_snap_id, files, restore_files, flag_all_files) job_id = self._job_create() self.bs_obj.trans_commit() return job_id @_handle_errors def fs_child_dependency(self, fs_id, files, flags=0): sim_fs_id = SimArray._sim_fs_id_of(fs_id) self.bs_obj.trans_begin() if self.bs_obj.clone_dst_sim_fs_ids_of_src(sim_fs_id) == [] and \ self.bs_obj.sim_fs_snaps(sim_fs_id) == []: self.bs_obj.trans_rollback() return False self.bs_obj.trans_rollback() return True @_handle_errors def fs_child_dependency_rm(self, fs_id, files, flags=0): """ Assuming API defination is break all clone relationship and remove all snapshot of this source file system. """ self.bs_obj.trans_begin() if self.fs_child_dependency(fs_id, files) is False: raise LsmError( ErrorNumber.NO_STATE_CHANGE, "No snapshot or fs clone target found for this file system") src_sim_fs_id = SimArray._sim_fs_id_of(fs_id) self.bs_obj.sim_fs_src_clone_break(src_sim_fs_id) self.bs_obj.sim_fs_snap_del_by_fs(src_sim_fs_id) job_id = self._job_create() self.bs_obj.trans_begin() return job_id @staticmethod def _sim_exp_2_lsm(sim_exp): exp_id = SimArray._sim_id_to_lsm_id(sim_exp['id'], 'EXP') fs_id = SimArray._sim_id_to_lsm_id(sim_exp['fs_id'], 'FS') return NfsExport(exp_id, fs_id, sim_exp['exp_path'], sim_exp['auth_type'], sim_exp['root_hosts'], sim_exp['rw_hosts'], sim_exp['ro_hosts'], sim_exp['anon_uid'], sim_exp['anon_gid'], sim_exp['options']) @_handle_errors def exports(self, flags=0): return [SimArray._sim_exp_2_lsm(e) for e in self.bs_obj.sim_exps()] @_handle_errors def fs_export(self, fs_id, exp_path, root_hosts, rw_hosts, ro_hosts, anon_uid, anon_gid, auth_type, options, flags=0): self.bs_obj.trans_begin() sim_exp_id = self.bs_obj.sim_exp_create( SimArray._sim_fs_id_of(fs_id), exp_path, root_hosts, rw_hosts, ro_hosts, anon_uid, anon_gid, auth_type, options) sim_exp = self.bs_obj.sim_exp_of_id(sim_exp_id) self.bs_obj.trans_commit() return SimArray._sim_exp_2_lsm(sim_exp) @_handle_errors def fs_unexport(self, exp_id, flags=0): self.bs_obj.trans_begin() self.bs_obj.sim_exp_delete(SimArray._sim_exp_id_of(exp_id)) self.bs_obj.trans_commit() return None @staticmethod def _sim_ag_2_lsm(sim_ag): ag_id = SimArray._sim_id_to_lsm_id(sim_ag['id'], 'AG') return AccessGroup(ag_id, sim_ag['name'], sim_ag['init_ids'], sim_ag['init_type'], BackStore.SYS_ID) @_handle_errors def ags(self): return list(SimArray._sim_ag_2_lsm(a) for a in self.bs_obj.sim_ags()) @_handle_errors def access_group_create(self, name, init_id, init_type, sys_id, flags=0): if sys_id != BackStore.SYS_ID: raise LsmError( ErrorNumber.NOT_FOUND_SYSTEM, "System not found") self.bs_obj.trans_begin() new_sim_ag_id = self.bs_obj.sim_ag_create(name, init_type, init_id) new_sim_ag = self.bs_obj.sim_ag_of_id(new_sim_ag_id) self.bs_obj.trans_commit() return SimArray._sim_ag_2_lsm(new_sim_ag) @_handle_errors def access_group_delete(self, ag_id, flags=0): self.bs_obj.trans_begin() self.bs_obj.sim_ag_delete(SimArray._sim_ag_id_of(ag_id)) self.bs_obj.trans_commit() return None @_handle_errors def access_group_initiator_add(self, ag_id, init_id, init_type, flags=0): sim_ag_id = SimArray._sim_ag_id_of(ag_id) self.bs_obj.trans_begin() self.bs_obj.sim_ag_init_add(sim_ag_id, init_id, init_type) new_sim_ag = self.bs_obj.sim_ag_of_id(sim_ag_id) self.bs_obj.trans_commit() return SimArray._sim_ag_2_lsm(new_sim_ag) @_handle_errors def access_group_initiator_delete(self, ag_id, init_id, init_type, flags=0): sim_ag_id = SimArray._sim_ag_id_of(ag_id) self.bs_obj.trans_begin() self.bs_obj.sim_ag_init_delete(sim_ag_id, init_id) sim_ag = self.bs_obj.sim_ag_of_id(sim_ag_id) self.bs_obj.trans_commit() return SimArray._sim_ag_2_lsm(sim_ag) @_handle_errors def volume_mask(self, ag_id, vol_id, flags=0): self.bs_obj.trans_begin() self.bs_obj.sim_vol_mask( SimArray._sim_vol_id_of(vol_id), SimArray._sim_ag_id_of(ag_id)) self.bs_obj.trans_commit() return None @_handle_errors def volume_unmask(self, ag_id, vol_id, flags=0): self.bs_obj.trans_begin() self.bs_obj.sim_vol_unmask( SimArray._sim_vol_id_of(vol_id), SimArray._sim_ag_id_of(ag_id)) self.bs_obj.trans_commit() return None @_handle_errors def volumes_accessible_by_access_group(self, ag_id, flags=0): self.bs_obj.trans_begin() sim_vols = self.bs_obj.sim_vols( sim_ag_id=SimArray._sim_ag_id_of(ag_id)) self.bs_obj.trans_rollback() return [SimArray._sim_vol_2_lsm(v) for v in sim_vols] @_handle_errors def access_groups_granted_to_volume(self, vol_id, flags=0): self.bs_obj.trans_begin() sim_ags = self.bs_obj.sim_ags( sim_vol_id=SimArray._sim_vol_id_of(vol_id)) self.bs_obj.trans_rollback() return [SimArray._sim_ag_2_lsm(a) for a in sim_ags] @_handle_errors def iscsi_chap_auth(self, init_id, in_user, in_pass, out_user, out_pass, flags=0): self.bs_obj.trans_begin() self.bs_obj.iscsi_chap_auth_set( init_id, in_user, in_pass, out_user, out_pass) self.bs_obj.trans_commit() return None @staticmethod def _sim_tgt_2_lsm(sim_tgt): tgt_id = "TGT_PORT_ID_%0*d" % (SimArray.ID_FMT, sim_tgt['id']) return TargetPort( tgt_id, sim_tgt['port_type'], sim_tgt['service_address'], sim_tgt['network_address'], sim_tgt['physical_address'], sim_tgt['physical_name'], BackStore.SYS_ID) @_handle_errors def target_ports(self): return list(SimArray._sim_tgt_2_lsm(t) for t in self.bs_obj.sim_tgts()) @_handle_errors def volume_raid_info(self, lsm_vol): sim_pool = self.bs_obj.sim_pool_of_id( SimArray._lsm_id_to_sim_id( lsm_vol.pool_id, LsmError(ErrorNumber.NOT_FOUND_POOL, "Pool not found"))) raid_type = sim_pool['raid_type'] strip_size = Volume.STRIP_SIZE_UNKNOWN min_io_size = BackStore.BLK_SIZE opt_io_size = Volume.OPT_IO_SIZE_UNKNOWN disk_count = Volume.DISK_COUNT_UNKNOWN if sim_pool['member_type'] == Pool.MEMBER_TYPE_POOL: parent_sim_pool = self.bs_obj.sim_pool_of_id( sim_pool['parent_pool_id']) raid_type = parent_sim_pool['raid_type'] disk_count = self.bs_obj.sim_pool_disks_count( parent_sim_pool['id']) data_disk_count = self.bs_obj.sim_pool_data_disks_count( parent_sim_pool['id']) else: disk_count = self.bs_obj.sim_pool_disks_count( sim_pool['id']) data_disk_count = self.bs_obj.sim_pool_data_disks_count( sim_pool['id']) if raid_type == Volume.RAID_TYPE_UNKNOWN or \ raid_type == Volume.RAID_TYPE_OTHER: return [ raid_type, strip_size, disk_count, min_io_size, opt_io_size] if raid_type == Volume.RAID_TYPE_MIXED: raise LsmError( ErrorNumber.PLUGIN_BUG, "volume_raid_info(): Got unsupported RAID_TYPE_MIXED pool " "%s" % sim_pool['id']) if raid_type == Volume.RAID_TYPE_RAID1 or \ raid_type == Volume.RAID_TYPE_JBOD: strip_size = BackStore.BLK_SIZE min_io_size = BackStore.BLK_SIZE opt_io_size = BackStore.BLK_SIZE else: strip_size = sim_pool['strip_size'] min_io_size = strip_size opt_io_size = int(data_disk_count * strip_size) return [raid_type, strip_size, disk_count, min_io_size, opt_io_size] @_handle_errors def pool_member_info(self, lsm_pool): sim_pool = self.bs_obj.sim_pool_of_id( SimArray._lsm_id_to_sim_id( lsm_pool.id, LsmError(ErrorNumber.NOT_FOUND_POOL, "Pool not found"))) member_type = sim_pool['member_type'] member_ids = [] if member_type == Pool.MEMBER_TYPE_POOL: member_ids = [ SimArray._sim_id_to_lsm_id( sim_pool['parent_pool_id'], 'POOL')] elif member_type == Pool.MEMBER_TYPE_DISK: member_ids = list( SimArray._sim_id_to_lsm_id(sim_disk_id, 'DISK') for sim_disk_id in self.bs_obj.sim_disk_ids_of_pool( sim_pool['id'])) else: member_type = Pool.MEMBER_TYPE_UNKNOWN return sim_pool['raid_type'], member_type, member_ids @_handle_errors def volume_raid_create_cap_get(self, system): if system.id != BackStore.SYS_ID: raise LsmError( ErrorNumber.NOT_FOUND_SYSTEM, "System not found") return ( BackStore.SUPPORTED_VCR_RAID_TYPES, BackStore.SUPPORTED_VCR_STRIP_SIZES) @_handle_errors def volume_raid_create(self, name, raid_type, disks, strip_size): if raid_type not in BackStore.SUPPORTED_VCR_RAID_TYPES: raise LsmError( ErrorNumber.NO_SUPPORT, "Provided 'raid_type' is not supported") if strip_size == Volume.VCR_STRIP_SIZE_DEFAULT: strip_size = BackStore.DEFAULT_STRIP_SIZE elif strip_size not in BackStore.SUPPORTED_VCR_STRIP_SIZES: raise LsmError( ErrorNumber.NO_SUPPORT, "Provided 'strip_size' is not supported") self.bs_obj.trans_begin() pool_name = "Pool for volume %s" % name sim_disk_ids = [ SimArray._lsm_id_to_sim_id( d.id, LsmError(ErrorNumber.NOT_FOUND_DISK, "Disk not found")) for d in disks] for disk in disks: if not disk.status & Disk.STATUS_FREE: raise LsmError( ErrorNumber.DISK_NOT_FREE, "Disk %s is not in DISK.STATUS_FREE mode" % disk.id) try: sim_pool_id = self.bs_obj.sim_pool_create_from_disk( name=pool_name, raid_type=raid_type, sim_disk_ids=sim_disk_ids, element_type=Pool.ELEMENT_TYPE_VOLUME, unsupported_actions=Pool.UNSUPPORTED_VOLUME_GROW | Pool.UNSUPPORTED_VOLUME_SHRINK, strip_size=strip_size) except sqlite3.IntegrityError as sql_error: raise LsmError( ErrorNumber.NAME_CONFLICT, "Name '%s' is already in use by other volume" % name) sim_pool = self.bs_obj.sim_pool_of_id(sim_pool_id) sim_vol_id = self.volume_create( SimArray._sim_id_to_lsm_id(sim_pool_id, 'POOL'), name, sim_pool['free_space'], Volume.PROVISION_FULL, _internal_use=True, _is_hw_raid_vol=1) sim_vol = self.bs_obj.sim_vol_of_id(sim_vol_id) self.bs_obj.trans_commit() return SimArray._sim_vol_2_lsm(sim_vol) libstoragemgmt-1.2.3/plugin/sim/__init__.py0000664000175000017500000000000012537546123015655 00000000000000libstoragemgmt-1.2.3/plugin/ontap/0000775000175000017500000000000012542455463014171 500000000000000libstoragemgmt-1.2.3/plugin/ontap/na.py0000664000175000017500000006746712537737032015104 00000000000000# Copyright (C) 2012-2014 Red Hat, Inc. # This library is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2.1 of the License, or any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library; If not, see . # # Author: tasleson import urllib2 import socket import sys from xml.etree import ElementTree import time from binascii import hexlify from _ssl import SSLError from M2Crypto import RC4 from lsm.external.xmltodict import convert_xml_to_dict from lsm import (ErrorNumber) #Set to an appropriate directory and file to dump the raw response. xml_debug = None def netapp_filer_parse_response(resp): if xml_debug: out = open(xml_debug, "wb") out.write(resp) out.close() return convert_xml_to_dict(ElementTree.fromstring(resp)) def param_value(val): """ Given a parameter to pass to filer, convert to XML """ rc = "" if type(val) is dict or isinstance(val, dict): for k, v in val.items(): rc += "<%s>%s" % (k, param_value(v), k) elif type(val) is list or isinstance(val, list): for i in val: rc += param_value(i) else: rc = val return rc def netapp_filer(host, username, password, timeout, command, parameters=None, ssl=False): """ Issue a command to the NetApp filer. Note: Change to default ssl on before we ship a release version. """ proto = 'http' if ssl: proto = 'https' url = "%s://%s/servlets/netapp.servlets.admin.XMLrequest_filer" % \ (proto, host) req = urllib2.Request(url) req.add_header('Content-Type', 'text/xml') password_manager = urllib2.HTTPPasswordMgrWithDefaultRealm() password_manager.add_password(None, url, username, password) auth_manager = urllib2.HTTPBasicAuthHandler(password_manager) opener = urllib2.build_opener(auth_manager) urllib2.install_opener(opener) #build the command and the arguments for it p = "" if parameters: for k, v in parameters.items(): p += "<%s>%s" % (k, param_value(v), k) payload = "<%s>\n%s\n" % (command, p, command) data = """ %s """ % payload handler = None rc = None try: handler = urllib2.urlopen(req, data, float(timeout)) if handler.getcode() == 200: rc = netapp_filer_parse_response(handler.read()) except urllib2.HTTPError as he: raise except urllib2.URLError as ue: if isinstance(ue.reason, socket.timeout): raise FilerError(Filer.ETIMEOUT, "Connection timeout") else: raise except socket.timeout: raise FilerError(Filer.ETIMEOUT, "Connection timeout") except SSLError as sse: # The ssl library doesn't give a good way to find specific reason. # We are doing a string contains which is not ideal, but other than # throwing a generic error in this case there isn't much we can do # to be more specific. if "timed out" in str(sse).lower(): raise FilerError(Filer.ETIMEOUT, "Connection timeout (SSL)") else: raise FilerError(Filer.EUNKNOWN, "SSL error occurred (%s)", str(sse)) finally: if handler: handler.close() return rc class FilerError(Exception): """ Class represents a NetApp bad return code """ IGROUP_NOT_CONTAIN_GIVEN_INIT = 9007 IGROUP_ALREADY_HAS_INIT = 9008 NO_SUCH_IGROUP = 9003 # Using the name from NetApp SDK netapp_errno.h EVDISK_ERROR_VDISK_EXISTS = 9012 # LUN name already in use EVDISK_ERROR_VDISK_EXPORTED = 9013 # LUN is currently mapped EVDISK_ERROR_VDISK_NOT_ENABLED = 9014 # LUN is not online EVDISK_ERROR_VDISK_NOT_DISABLED = 9015 # LUN is not offline EVDISK_ERROR_NO_SUCH_LUNMAP = 9016 # LUN is already unmapped EVDISK_ERROR_INITGROUP_MAPS_EXIST = 9029 # LUN maps for this initiator # group exist EVDISK_ERROR_SIZE_TOO_LARGE = 9034 # LUN size too large. EVDISK_ERROR_NO_SUCH_VOLUME = 9036 # NetApp Volume not exists. EVDISK_ERROR_SIZE_TOO_SMALL = 9041 # Specified too small a size EVDISK_ERROR_SIZE_UNCHANGED = 9042 # requested size is the same. EVDISK_ERROR_INITGROUP_HAS_VDISK = 9023 # Already masked def __init__(self, errno, reason, *args, **kwargs): Exception.__init__(self, *args, **kwargs) self.errno = int(errno) self.reason = reason def to_list(v): """ The return values in hash form can either be a single hash item or a list of hash items, this code handles both to make callers always get a list. """ rc = [] if v is not None: if isinstance(v, list): rc = v else: rc.append(v) return rc class Filer(object): """ Class to handle NetApp API calls. Note: These are using lsm terminology. """ EUNKNOWN = 10 # Non-specific error ENAVOL_NAME_DUPE = 17 # Volume name collision ENOSPC = 28 # Out of space ETIMEOUT = 60 # Time-out EINVALID_ISCSI_NAME = 9006 # Invalid ISCSI IQN EDUPE_VOLUME_PATH = 9012 # Duplicate volume name ENO_SUCH_VOLUME = 9017 # lun not found ESIZE_TOO_LARGE = 9034 # Specified too large a size ENO_SUCH_FS = 9036 # FS not found EVOLUME_TOO_SMALL = 9041 # Specified too small a size EAPILICENSE = 13008 # Unlicensed API EFSDOESNOTEXIST = 13040 # FS does not exist EFSOFFLINE = 13042 # FS is offline. EFSNAMEINVALID = 13044 # FS Name invalid ENOSPACE = 13062 # Not enough space ESERVICENOTLICENSED = 13902 # Not licensed ECLONE_NAME_EXISTS = 14952 # Clone with same name exists ECLONE_LICENSE_EXPIRED = 14955 # Not licensed ECLONE_NOT_LICENSED = 14956 # Not licensed (LSM_VOL_PREFIX, LSM_INIT_PREFIX) = ('lsm_lun_container', 'lsm_init_') def _invoke(self, command, parameters=None): rc = netapp_filer(self.host, self.username, self.password, self.timeout, command, parameters, self.ssl) t = rc['netapp']['results']['attrib'] if t['status'] != 'passed': raise FilerError(t['errno'], t['reason']) return rc['netapp']['results'] def __init__(self, host, username, password, timeout, ssl=True): self.host = host self.username = username self.password = password self.timeout = timeout self.ssl = ssl def system_info(self): rc = self._invoke('system-get-info') return rc['system-info'] def validate(self): #TODO: Validate that everything we need to function is available? self._invoke('system-api-list') return None def disks(self): disks = self._invoke('disk-list-info') return disks['disk-details']['disk-detail-info'] def aggregates(self, aggr_name=None): """ Return a list of aggregates If aggr_name provided, return [na_aggr] """ if aggr_name: pools = self._invoke('aggr-list-info', {'aggregate': aggr_name}) else: pools = self._invoke('aggr-list-info') tmp = pools['aggregates']['aggr-info'] return to_list(tmp) def aggregate_volume_names(self, aggr_name): """ Return a list of volume names that are on an aggregate """ vol_names = [] rc = self._invoke('aggr-list-info', {'aggregate': aggr_name}) aggr = rc['aggregates']['aggr-info'] if aggr is not None and aggr['volumes'] is not None: vols = aggr['volumes']['contained-volume-info'] vol_names = [e['name'] for e in to_list(vols)] return vol_names def lun_build_name(self, volume_name, file_name): """ Given a volume name and file return full path" """ return '/vol/%s/%s' % (volume_name, file_name) def luns_get_specific(self, aggr, na_lun_name=None, na_volume_name=None): """ Return all logical units, or information about one or for all those on a volume name. """ rc = [] if na_lun_name is not None: luns = self._invoke('lun-list-info', {'path': na_lun_name}) elif na_volume_name is not None: luns = self._invoke('lun-list-info', {'volume-name': na_volume_name}) else: luns = self._invoke('lun-list-info') return to_list(luns['luns']['lun-info']) def _get_aggr_info(self): aggrs = self._invoke('aggr-list-info') tmp = to_list(aggrs['aggregates']['aggr-info']) return [x for x in tmp if x['volumes'] is not None] def luns_get_all(self): """ Return all lun-info """ try: return to_list(self._invoke('lun-list-info')['luns']['lun-info']) except TypeError: # No LUN found. return [] def lun_min_size(self): return self._invoke('lun-get-minsize', {'type': 'image'})['min-size'] def lun_create(self, full_path_name, size_bytes, flag_thin=False): """ Creates a lun If flag_thin set to True, will set 'space-reservation-enabled' as 'false' which means "create a LUN without any space being reserved". """ params = {'path': full_path_name, 'size': size_bytes} if flag_thin is True: params['space-reservation-enabled'] = 'false' self._invoke('lun-create-by-size', params) def lun_delete(self, lun_path): """ Deletes a lun given a lun path """ self._invoke('lun-destroy', {'path': lun_path}) def lun_resize(self, lun_path, size_bytes): """ Re-sizes a lun """ self._invoke('lun-resize', {'path': lun_path, 'size': size_bytes, 'force': 'true'}) def volume_resize(self, na_vol_name, size_diff_kb): """ Given a NetApp volume name and a size change in kb, re-size the NetApp volume. """ params = {'volume': na_vol_name} if size_diff_kb > 0: params['new-size'] = '+' + str(size_diff_kb) + 'k' else: params['new-size'] = str(size_diff_kb) + 'k' self._invoke('volume-size', params) return None def volumes(self, volume_name=None): """ Return a list of NetApp volumes """ if not volume_name: v = self._invoke('volume-list-info') else: v = self._invoke('volume-list-info', {'volume': volume_name}) t = v['volumes']['volume-info'] rc = to_list(t) return rc def volume_create(self, aggr_name, vol_name, size_in_bytes): """ Creates a volume given an aggr_name, volume name and size in bytes. """ params = {'containing-aggr-name': aggr_name, 'size': int(size_in_bytes * 1.30), #There must be a better way to account for this 'volume': vol_name} self._invoke('volume-create', params) #Turn off scheduled snapshots self._invoke('volume-set-option', {'volume': vol_name, 'option-name': 'nosnap', 'option-value': 'on', }) #Turn off auto export! self.nfs_export_remove(['/vol/' + vol_name]) def volume_clone(self, src_volume, dest_volume, snapshot=None): """ Clones a volume given a source volume name, destination volume name and optional backing snapshot. """ params = {'parent-volume': src_volume, 'volume': dest_volume} if snapshot: params['parent-snapshot'] = snapshot.name self._invoke('volume-clone-create', params) def volume_delete(self, vol_name): """ Deletes a volume and everything on it. """ online = False try: self._invoke('volume-offline', {'name': vol_name}) online = True except FilerError as f_error: if f_error.errno != Filer.EFSDOESNOTEXIST: raise try: self._invoke('volume-destroy', {'name': vol_name}) except FilerError as f_error: #If the volume was online, we will return it to same status # Store the original exception information exception_info = sys.exc_info() if online: try: self._invoke('volume-online', {'name': vol_name}) except FilerError: pass raise exception_info[1], None, exception_info[2] def volume_names(self): """ Return a list of volume names """ vols = self.volumes() return [v['name'] for v in vols] def clone(self, source_path, dest_path, backing_snapshot=None, ranges=None): """ Creates a file clone """ params = {'source-path': source_path} #You can have source == dest, but if you do you can only specify source if source_path != dest_path: params['destination-path'] = dest_path if backing_snapshot: raise FilerError(ErrorNumber.NO_SUPPORT, "Support for backing luns not implemented " "for this API version") #params['snapshot-name']= backing_snapshot if ranges: block_ranges = [] for r in ranges: values = {'block-count': r.block_count, 'destination-block-number': r.dest_block, 'source-block-number': r.src_block} block_ranges.append({'block-range': values}) params['block-ranges'] = block_ranges rc = self._invoke('clone-start', params) c_id = rc['clone-id'] while True: progress = self._invoke('clone-list-status', {'clone-id': c_id}) # According to the spec the output is optional, if not present # then we are done and good if 'status' in progress: progress = progress['status']['ops-info'] if progress['clone-state'] == 'failed': self._invoke('clone-clear', {'clone-id': c_id}) raise FilerError(progress['error'], progress['reason']) elif progress['clone-state'] == 'running' \ or progress['clone-state'] == 'fail exit': # State needs to transition to failed before we can # clear it! time.sleep(0.2) # Don't hog cpu elif progress['clone-state'] == 'completed': return else: raise FilerError(ErrorNumber.NO_SUPPORT, 'Unexpected state=' + progress['clone-state']) else: return def lun_online(self, lun_path): self._invoke('lun-online', {'path': lun_path}) def lun_offline(self, lun_path): self._invoke('lun-offline', {'path': lun_path}) def igroups(self, group_name=None): rc = [] if group_name: g = self._invoke('igroup-list-info', {'initiator-group-name': group_name}) else: g = self._invoke('igroup-list-info') if g['initiator-groups']: rc = to_list(g['initiator-groups']['initiator-group-info']) return rc def igroup_create(self, name, igroup_type): params = {'initiator-group-name': name, 'initiator-group-type': igroup_type} self._invoke('igroup-create', params) def igroup_delete(self, name): self._invoke('igroup-destroy', {'initiator-group-name': name}) @staticmethod def encode(password): rc4 = RC4.RC4() rc4.set_key("#u82fyi8S5\017pPemw") return hexlify(rc4.update(password)) def iscsi_initiator_add_auth(self, initiator, user_name, password, out_user, out_password): pw = self.encode(password) args = {'initiator': initiator} if user_name and len(user_name) and password and len(password): args.update({'user-name': user_name, 'password': pw, 'auth-type': "CHAP"}) if out_user and len(out_user) and \ out_password and len(out_password): args.update({'outbound-user-name': out_user, 'outbound-password': out_password}) else: args.update({'initiator': initiator, 'auth-type': "none"}) self._invoke('iscsi-initiator-add-auth', args) def igroup_add_initiator(self, ig, initiator): self._invoke('igroup-add', {'initiator-group-name': ig, 'initiator': initiator}) def igroup_del_initiator(self, ig, initiator): self._invoke('igroup-remove', {'initiator-group-name': ig, 'initiator': initiator, 'force': 'true'}) def lun_map(self, igroup, lun_path): self._invoke('lun-map', {'initiator-group': igroup, 'path': lun_path}) def lun_unmap(self, igroup, lun_path): self._invoke( 'lun-unmap', {'initiator-group': igroup, 'path': lun_path}) def lun_map_list_info(self, lun_path): initiator_groups = [] rc = self._invoke('lun-map-list-info', {'path': lun_path}) if rc['initiator-groups'] is not None: igi = to_list(rc['initiator-groups']) for i in igi: group_name = i['initiator-group-info']['initiator-group-name'] initiator_groups.append(self.igroups(group_name)[0]) return initiator_groups def lun_initiator_list_map_info(self, initiator_id, initiator_group_name): """ Given an initiator_id and initiator group name, return a list of lun-info """ luns = [] rc = self._invoke('lun-initiator-list-map-info', {'initiator': initiator_id}) if rc['lun-maps']: lun_name_list = to_list(rc['lun-maps']['lun-map-info']) #Get all the lun with information about aggr all_luns = self.luns_get_all() for l in lun_name_list: if l['initiator-group'] == initiator_group_name: for al in all_luns: if al['path'] == l['path']: luns.append(al) return luns def snapshots(self, volume_name): rc = [] args = {'target-type': 'volume', 'target-name': volume_name} ss = self._invoke('snapshot-list-info', args) if ss['snapshots']: rc = to_list(ss['snapshots']['snapshot-info']) return rc def snapshot_create(self, volume_name, snapshot_name): self._invoke('snapshot-create', {'volume': volume_name, 'snapshot': snapshot_name}) return [v for v in self.snapshots(volume_name) if v['name'] == snapshot_name][0] def snapshot_file_restore_num(self): """ Returns the number of executing file restore snapshots. """ rc = self._invoke('snapshot-restore-file-info') if 'sfsr-in-progress' in rc: return int(rc['sfsr-in-progress']) return 0 def snapshot_restore_volume(self, fs_name, snapshot_name): """ Restores all files on a volume """ params = {'snapshot': snapshot_name, 'volume': fs_name} self._invoke('snapshot-restore-volume', params) def snapshot_restore_file(self, snapshot_name, restore_path, restore_file): """ Restore a list of files """ params = {'snapshot': snapshot_name, 'path': restore_path} if restore_file: params['restore-path'] = restore_file self._invoke('snapshot-restore-file', params) def snapshot_delete(self, volume_name, snapshot_name): self._invoke('snapshot-delete', {'volume': volume_name, 'snapshot': snapshot_name}) def export_auth_types(self): rc = self._invoke('nfs-get-supported-sec-flavors') return [e['flavor'] for e in to_list(rc['sec-flavor']['sec-flavor-info'])] @staticmethod def _build_list(pylist, list_name, elem_name): """ Given a python list, build the appropriate dict that contains the list items so that it can be converted to xml to be sent on the wire. """ return [{list_name: {elem_name: l}} for l in pylist] @staticmethod def _build_export_fs_all(): return Filer._build_list( ['true'], 'exports-hostname-info', 'all-hosts') @staticmethod def _build_export_fs_list(hosts): if hosts[0] == '*': return Filer._build_export_fs_all() else: return Filer._build_list(hosts, 'exports-hostname-info', 'name') def _build_export_rules(self, volume_path, export_path, ro_list, rw_list, root_list, anonuid=None, sec_flavor=None): """ Common logic to build up the rules for nfs """ #One of the more complicated data structures to push down to the #controller rule = {'pathname': volume_path} if volume_path != export_path: rule['actual-pathname'] = volume_path rule['pathname'] = export_path rule['security-rules'] = {} rule['security-rules']['security-rule-info'] = {} r = rule['security-rules']['security-rule-info'] if len(ro_list): r['read-only'] = Filer._build_export_fs_list(ro_list) if len(rw_list): r['read-write'] = Filer._build_export_fs_list(rw_list) if len(root_list): r['root'] = Filer._build_export_fs_list(root_list) if anonuid: uid = long(anonuid) if uid != -1 and uid != 0xFFFFFFFFFFFFFFFF: r['anon'] = str(uid) if sec_flavor: r['sec-flavor'] = Filer._build_list( [sec_flavor], 'sec-flavor-info', 'flavor') return rule def nfs_export_fs2(self, volume_path, export_path, ro_list, rw_list, root_list, anonuid=None, sec_flavor=None): """ NFS export a volume. """ rule = self._build_export_rules( volume_path, export_path, ro_list, rw_list, root_list, anonuid, sec_flavor) params = {'persistent': 'true', 'rules': {'exports-rule-info-2': [rule]}, 'verbose': 'true'} self._invoke('nfs-exportfs-append-rules-2', params) def nfs_export_fs_modify2(self, volume_path, export_path, ro_list, rw_list, root_list, anonuid=None, sec_flavor=None): """ Modifies an existing rule. """ rule = self._build_export_rules( volume_path, export_path, ro_list, rw_list, root_list, anonuid, sec_flavor) params = { 'persistent': 'true', 'rule': {'exports-rule-info-2': [rule]}} self._invoke('nfs-exportfs-modify-rule-2', params) def nfs_export_remove(self, export_paths): """ Removes an existing export """ assert (type(export_paths) is list) paths = Filer._build_list(export_paths, 'pathname-info', 'name') self._invoke('nfs-exportfs-delete-rules', {'pathnames': paths, 'persistent': 'true'}) def nfs_exports(self): """ Returns a list of exports (in hash form) """ rc = [] exports = self._invoke('nfs-exportfs-list-rules') if 'rules' in exports and exports['rules']: rc = to_list(exports['rules']['exports-rule-info']) return rc def volume_children(self, volume): params = {'volume': volume} rc = self._invoke('volume-list-info', params) if 'clone-children' in rc['volumes']['volume-info']: tmp = rc['volumes']['volume-info']['clone-children'][ 'clone-child-info'] rc = [c['clone-child-name'] for c in to_list(tmp)] else: rc = None return rc def volume_split_clone(self, volume): self._invoke('volume-clone-split-start', {'volume': volume}) def volume_split_status(self): result = [] rc = self._invoke('volume-clone-split-status') if 'clone-split-details' in rc: tmp = rc['clone-split-details']['clone-split-detail-info'] result = [r['name'] for r in to_list(tmp)] return result def fcp_list(self): fcp_list = [] try: rc = self._invoke('fcp-adapter-list-info') if 'fcp-config-adapters' in rc: if 'fcp-config-adapter-info' in rc['fcp-config-adapters']: fc_config = rc['fcp-config-adapters'] adapters = fc_config['fcp-config-adapter-info'] for f in adapters: fcp_list.append(dict(addr=f['port-name'], adapter=f['adapter'])) except FilerError as na: if na.errno != Filer.EAPILICENSE: raise return fcp_list def iscsi_node_name(self): try: rc = self._invoke('iscsi-node-get-name') if 'node-name' in rc: return rc['node-name'] except FilerError as na: if na.errno != Filer.EAPILICENSE: raise return None def interface_get_infos(self): i_info = {} rc = self._invoke('net-ifconfig-get') if 'interface-config-info' in rc: i_config = rc['interface-config-info'] if 'interface-config-info' in i_config: tmp = to_list(i_config['interface-config-info']) for i in tmp: i_info[i['interface-name']] = i return i_info def iscsi_list(self): i_list = [] # Get interface information i_info = self.interface_get_infos() try: rc = self._invoke('iscsi-portal-list-info') if 'iscsi-portal-list-entries' in rc: portal_entries = rc['iscsi-portal-list-entries'] if 'iscsi-portal-list-entry-info' in portal_entries: tmp = portal_entries['iscsi-portal-list-entry-info'] portals = to_list(tmp) for p in portals: mac = i_info[p['interface-name']]['mac-address'] i_list.append(dict(interface=p['interface-name'], ip=p['ip-address'], port=p['ip-port'], mac=mac)) except FilerError as na: if na.errno != Filer.EAPILICENSE: raise return i_list if __name__ == '__main__': try: #TODO: Need some unit test code pass except FilerError as fe: print 'Errno=', fe.errno, 'reason=', fe.reason libstoragemgmt-1.2.3/plugin/ontap/ontap_lsmplugin0000775000175000017500000000232612537737032017254 00000000000000#!/usr/bin/env python2 # Copyright (C) 2011-2013 Red Hat, Inc. # This library is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2.1 of the License, or any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library; If not, see . # # Author: tasleson import sys import syslog import traceback try: from lsm.plugin.ontap.ontap import Ontap from lsm import PluginRunner if __name__ == '__main__': PluginRunner(Ontap, sys.argv).run() except Exception: #This should be quite rare, but when it does happen this is pretty #key in understanding what happened, especially when it happens when #running from the daemon. msg = str(traceback.format_exc()) syslog.syslog(syslog.LOG_ERR, msg) sys.stderr.write(msg) sys.exit(1) libstoragemgmt-1.2.3/plugin/ontap/ontap.py0000664000175000017500000014642412537737032015616 00000000000000# Copyright (C) 2011-2014 Red Hat, Inc. # This library is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2.1 of the License, or any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library; If not, see . # # Author: tasleson # Gris Ge import os import urlparse import copy import na from lsm import (Volume, FileSystem, FsSnapshot, NfsExport, AccessGroup, System, Capabilities, Disk, Pool, IStorageAreaNetwork, INfs, LsmError, ErrorNumber, JobStatus, md5, VERSION, common_urllib2_error_handler, search_property, TargetPort) #Maps na to lsm, this is expected to expand over time. e_map = { na.Filer.ENOSPC: ErrorNumber.NOT_ENOUGH_SPACE, na.Filer.ENO_SUCH_VOLUME: ErrorNumber.NOT_FOUND_VOLUME, na.Filer.ESIZE_TOO_LARGE: ErrorNumber.NOT_ENOUGH_SPACE, na.Filer.ENOSPACE: ErrorNumber.NOT_ENOUGH_SPACE, na.Filer.ENO_SUCH_FS: ErrorNumber.NOT_FOUND_FS, na.Filer.EAPILICENSE: ErrorNumber.NOT_LICENSED, na.Filer.EFSDOESNOTEXIST: ErrorNumber.NOT_FOUND_FS, na.Filer.EFSOFFLINE: ErrorNumber.NO_SUPPORT_ONLINE_CHANGE, na.Filer.EFSNAMEINVALID: ErrorNumber.INVALID_ARGUMENT, na.Filer.ESERVICENOTLICENSED: ErrorNumber.NOT_LICENSED, na.Filer.ECLONE_LICENSE_EXPIRED: ErrorNumber.NOT_LICENSED, na.Filer.ECLONE_NOT_LICENSED: ErrorNumber.NOT_LICENSED, na.Filer.EINVALID_ISCSI_NAME: ErrorNumber.INVALID_ARGUMENT, na.Filer.ETIMEOUT: ErrorNumber.TIMEOUT, na.Filer.EUNKNOWN: ErrorNumber.PLUGIN_BUG, na.Filer.EDUPE_VOLUME_PATH: ErrorNumber.NAME_CONFLICT, na.Filer.ENAVOL_NAME_DUPE: ErrorNumber.NAME_CONFLICT, na.Filer.ECLONE_NAME_EXISTS: ErrorNumber.NAME_CONFLICT } def error_map(oe): """ Maps a ontap error code to a lsm error code. Returns a tuple containing error code and text. """ if oe.errno in e_map: return e_map[oe.errno], oe.reason else: return ErrorNumber.PLUGIN_BUG, \ oe.reason + " (vendor error code= " + str(oe.errno) + ")" def handle_ontap_errors(method): def na_wrapper(*args, **kwargs): try: return method(*args, **kwargs) except LsmError: raise except na.FilerError as oe: error_code, error_msg = error_map(oe) raise LsmError(error_code, error_msg) except Exception as e: common_urllib2_error_handler(e) return na_wrapper _INIT_TYPE_CONV = { 'iscsi': AccessGroup.INIT_TYPE_ISCSI_IQN, 'fcp': AccessGroup.INIT_TYPE_WWPN, 'mixed': AccessGroup.INIT_TYPE_ISCSI_WWPN_MIXED, } def _na_init_type_to_lsm(na_ag): if 'initiator-group-type' in na_ag: if na_ag['initiator-group-type'] in _INIT_TYPE_CONV.keys(): return _INIT_TYPE_CONV[na_ag['initiator-group-type']] else: return AccessGroup.INIT_TYPE_OTHER return AccessGroup.INIT_TYPE_UNKNOWN def _lsm_vol_to_na_vol_path(vol): return vol.id class Ontap(IStorageAreaNetwork, INfs): TMO_CONV = 1000.0 (SS_JOB, SPLIT_JOB) = ('ontap-ss-file-restore', 'ontap-clone-split') VOLUME_PREFIX = '/vol' NA_VOL_STATUS_TO_LSM = { 'offline': Pool.STATUS_STOPPED, 'online': Pool.STATUS_OK, 'restricted': Pool.STATUS_OTHER, 'unknown': Pool.STATUS_UNKNOWN, 'creating': Pool.STATUS_INITIALIZING, 'failed': Pool.STATUS_ERROR, 'partial': Pool.STATUS_ERROR, } NA_VOL_STATUS_TO_LSM_STATUS_INFO = { 'partial': 'all the disks in the volume are not available.', 'restricted': 'volume is restricted to protocol accesses', } # strip size: http://www.netapp.com/us/media/tr-3001.pdf _STRIP_SIZE = 4096 _OPT_IO_SIZE = 65536 def __init__(self): self.f = None self.sys_info = None @handle_ontap_errors def plugin_register(self, uri, password, timeout, flags=0): ssl = False u = urlparse.urlparse(uri) if u.scheme.lower() == 'ontap+ssl': ssl = True self.f = na.Filer(u.hostname, u.username, password, timeout / Ontap.TMO_CONV, ssl) #Smoke test i = self.f.system_info() #TODO Get real filer status self.sys_info = System(i['system-id'], i['system-name'], System.STATUS_OK, '') return self.f.validate() def time_out_set(self, ms, flags=0): self.f.timeout = int(ms / Ontap.TMO_CONV) def time_out_get(self, flags=0): return int(self.f.timeout * Ontap.TMO_CONV) def plugin_unregister(self, flags=0): pass @staticmethod def _create_vpd(sn): """ Construct the vpd83 for this lun """ return "60a98000" + ''.join(["%02x" % ord(x) for x in sn]) @staticmethod def _lsm_lun_name(path_name): return os.path.basename(path_name) def _lun(self, l): block_size = int(l['block-size']) num_blocks = int(l['size']) / block_size pool_id = "/".join(l['path'].split('/')[0:3]) vol_id = l['path'] vol_name = os.path.basename(vol_id) admin_state = Volume.ADMIN_STATE_ENABLED if l['online'] == 'false': admin_state = Volume.ADMIN_STATE_DISABLED return Volume(vol_id, vol_name, Ontap._create_vpd(l['serial-number']), block_size, num_blocks, admin_state, self.sys_info.id, pool_id) def _vol(self, v, pools=None): pool_name = v['containing-aggregate'] if pools is None: pools = self.pools() for p in pools: if p.name == pool_name: return FileSystem(v['uuid'], v['name'], int(v['size-total']), int(v['size-available']), p.id, self.sys_info.id) @staticmethod def _ss(s): #If we use the newer API we can use the uuid instead of this fake #md5 one return FsSnapshot(md5(s['name'] + s['access-time']), s['name'], s['access-time']) _NA_DISK_TYPE_TO_LSM = { 'ATA': Disk.TYPE_ATA, 'BSAS': Disk.TYPE_SATA, 'EATA': Disk.TYPE_ATA, 'FCAL': Disk.TYPE_FC, 'FSAS': Disk.TYPE_NL_SAS, 'LUN': Disk.TYPE_OTHER, 'MSATA': Disk.TYPE_SATA, 'SAS': Disk.TYPE_SAS, 'SATA': Disk.TYPE_SATA, 'SCSI': Disk.TYPE_SCSI, 'SSD': Disk.TYPE_SSD, 'XATA': Disk.TYPE_ATA, 'XSAS': Disk.TYPE_SAS, 'unknown': Disk.TYPE_UNKNOWN, } @staticmethod def _disk_type_of(na_disk): """ Convert na_disk['effective-disk-type'] to LSM disk type. """ na_disk_type = na_disk['effective-disk-type'] if na_disk_type in Ontap._NA_DISK_TYPE_TO_LSM.keys(): return Ontap._NA_DISK_TYPE_TO_LSM[na_disk_type] return Disk.TYPE_UNKNOWN @staticmethod def _disk_id(na_disk): """ The md5sum of na_disk['disk-uid'] """ return md5(na_disk['disk-uid']) @staticmethod def _status_of_na_disk(na_disk): """ Retrieve Disk.status from NetApp ONTAP disk-detail-info. TODO: API document does not provide enough explaination. Need lab test to verify. """ status = 0 if 'raid-state' in na_disk: rs = na_disk['raid-state'] if rs == "broken": if na_disk['broken-details'] == 'admin removed' or \ na_disk['broken-details'] == 'admin failed': status |= Disk.STATUS_REMOVED elif na_disk['broken-details'] == 'admin testing': status |= Disk.STATUS_STOPPED | \ Disk.STATUS_MAINTENANCE_MODE else: status |= Disk.STATUS_ERROR elif rs == "unknown": status |= Disk.STATUS_UNKNOWN elif rs == 'zeroing': status |= Disk.STATUS_INITIALIZING | Disk.STATUS_SPARE_DISK elif rs == 'reconstructing' or rs == 'copy': # "reconstructing' should be a pool status, not disk status. # disk under reconstructing should be considered as OK. status |= Disk.STATUS_OK | Disk.STATUS_RECONSTRUCT elif rs == 'spare': if 'is-zeroed' in na_disk and na_disk['is-zeroed'] == 'true': status |= Disk.STATUS_OK | Disk.STATUS_SPARE_DISK else: # If spare disk is not zerored, it will be automaticlly # zeroed before assigned to aggregate. # Hence we consider non-zeroed spare disks as stopped # spare disks. status |= Disk.STATUS_STOPPED | Disk.STATUS_SPARE_DISK elif rs == 'present': status |= Disk.STATUS_OK elif rs == 'partner': # Before we have good way to connect two controller, # we have to mark partner disk as OTHER return Disk.STATUS_OTHER if 'is-prefailed' in na_disk and na_disk['is-prefailed'] == 'true': status |= Disk.STATUS_STOPPING if 'is-offline' in na_disk and na_disk['is-offline'] == 'true': status |= Disk.STATUS_ERROR if 'aggregate' not in na_disk: # All free disks are automatically marked as spare disks. They # could easily convert to data or parity disk without any # explicit command. status |= Disk.STATUS_FREE if status == 0: status = Disk.STATUS_UNKNOWN return status @staticmethod def _status_info_of_na_disk(na_disk): """ Provide more explainaion in Disk.status_info. TODO: API document does not provide enough explaination. Need lab test to verify. """ status_info = '' if 'raid-state' in na_disk: rs = na_disk['raid-state'] if rs == 'reconstructing': status_info = "Reconstruction progress: %s%%" %\ str(na_disk['reconstruction-percent']) if 'broken-details' in na_disk: status_info = na_disk['broken-details'] return status_info def _disk(self, d, flag): status = Ontap._status_of_na_disk(d) return Disk(self._disk_id(d), d['name'], Ontap._disk_type_of(d), int(d['bytes-per-sector']), int(d['physical-blocks']), status, self.sys_info.id) @handle_ontap_errors def volumes(self, search_key=None, search_value=None, flags=0): luns = self.f.luns_get_all() return search_property( [self._lun(l) for l in luns], search_key, search_value) # This is based on NetApp ONTAP Manual pages: # https://library.netapp.com/ecmdocs/ECMP1196890/html/man1/na_aggr.1.html _AGGR_RAID_STATUS_CONV = { 'normal': Pool.STATUS_OK, 'verifying': Pool.STATUS_OK | Pool.STATUS_VERIFYING, 'copying': Pool.STATUS_INITIALIZING, 'ironing': Pool.STATUS_OK | Pool.STATUS_VERIFYING, 'resyncing': Pool.STATUS_OK | Pool.STATUS_DEGRADED | Pool.STATUS_RECONSTRUCTING, 'mirror degraded': Pool.STATUS_OK | Pool.STATUS_DEGRADED, 'needs check': Pool.STATUS_ERROR, 'initializing': Pool.STATUS_INITIALIZING, 'growing': Pool.STATUS_OK | Pool.STATUS_GROWING, 'partial': Pool.STATUS_ERROR, 'noparity': Pool.STATUS_OTHER, 'degraded': Pool.STATUS_OK | Pool.STATUS_DEGRADED, 'reconstruct': Pool.STATUS_OK | Pool.STATUS_DEGRADED | Pool.STATUS_RECONSTRUCTING, 'out-of-date': Pool.STATUS_OTHER, 'foreign': Pool.STATUS_OTHER, } _AGGR_RAID_ST_INFO_CONV = { 'copying': 'The aggregate is currently the target aggregate of an' 'active aggr copy operation. ', 'invalid': 'The aggregate does not contain any volume and no volume' 'can be added to it. Typically this happens after an ' 'aborted aggregate copy operation. ', 'needs check': 'A WAFL consistency check needs to be performed on ' 'the aggregate. ', 'partial': 'Two or more disks are missing.', # noparity, no document found. 'noparity': 'NetApp ONTAP mark this aggregate as "noparity". ', # out-of-data: no document found. 'out-of-date': 'NetApp ONTAP mark this aggregate as "out-of-date". ', 'foreign': "The disks that the aggregate contains were moved to the" "current node from another node. " } @staticmethod def _status_of_na_aggr(na_aggr): """ Use aggr-info['state'] and ['raid-status'] for Pool.status and status_info. Return (status, status_info) """ status = 0 status_info = '' na_aggr_raid_status_list = list( x.strip() for x in na_aggr['raid-status'].split(',')) for na_aggr_raid_status in na_aggr_raid_status_list: if na_aggr_raid_status in Ontap._AGGR_RAID_STATUS_CONV.keys(): status |= Ontap._AGGR_RAID_STATUS_CONV[na_aggr_raid_status] if na_aggr_raid_status in Ontap._AGGR_RAID_ST_INFO_CONV.keys(): status_info += \ Ontap._AGGR_RAID_ST_INFO_CONV[na_aggr_raid_status] # Now check na_aggr['state'] na_aggr_state = na_aggr['state'].strip() if na_aggr_state == 'online' or na_aggr_state == 'creating': pass elif na_aggr_state == 'offline': # When aggr is marked as offline, the restruction is stoped. if status & Pool.STATUS_RECONSTRUCTING: status -= Pool.STATUS_RECONSTRUCTING status |= Pool.STATUS_DEGRADED status |= Pool.STATUS_STOPPED else: status_info += "%s " % na_aggr_state if status == 0: status = Pool.STATUS_OK return status, status_info def _pool_from_na_aggr(self, na_aggr, flags): pool_id = na_aggr['name'] pool_name = na_aggr['name'] total_space = int(na_aggr['size-total']) free_space = int(na_aggr['size-available']) system_id = self.sys_info.id (status, status_info) = self._status_of_na_aggr(na_aggr) element_type = (Pool.ELEMENT_TYPE_POOL | Pool.ELEMENT_TYPE_FS) # The system aggregate can be used to create both FS and volumes, but # you can't take it offline or delete it. if pool_name == 'aggr0': element_type = element_type | Pool.ELEMENT_TYPE_SYS_RESERVED return Pool(pool_id, pool_name, element_type, 0, total_space, free_space, status, status_info, system_id) @staticmethod def _status_info_of_na_vol(na_vol): na_vol_state = na_vol['state'] if na_vol_state in Ontap.NA_VOL_STATUS_TO_LSM_STATUS_INFO.keys(): return Ontap.NA_VOL_STATUS_TO_LSM_STATUS_INFO[na_vol_state] return '' @staticmethod def _pool_id_of_na_vol_name(na_vol_name): return "%s/%s" % (Ontap.VOLUME_PREFIX, na_vol_name) def _pool_from_na_vol(self, na_vol, na_aggrs, flags): element_type = Pool.ELEMENT_TYPE_VOLUME # Thin provisioning is controled by: # 1. NetApp Volume level: # 'guarantee' option and 'fractional_reserve' option. # If 'guarantee' is 'file', 'fractional_reserve' is forced to # be 100, we can create Thin LUN and full allocated LUN. # If 'guarantee' is 'volume' and 'fractional_reserve' is 100, we # can create full LUN. # If 'guarantee' is 'volume' and 'fractional_reserve' is less # than 100, we can only create thin LUN. # If 'guarantee' is 'none', we can only create thin LUN. # 2. NetApp LUN level: # If option 'reservation' is enabled, it's a full allocated LUN # when parent NetApp volume allowed. # If option 'reservation' is disabled, it's a thin LUN if # parent NetApp volume allowed. if 'space-reserve' in na_vol and \ 'space-reserve-enabled' in na_vol and \ 'reserve' in na_vol and \ na_vol['space-reserve-enabled'] == 'true': # 'space-reserve' and 'space-reserve-enabled' might not appear if # the flexible volume is restricted or offline. if na_vol['space-reserve'] == 'file': # space-reserve: 'file' means only LUN or file marked as # 'Space Reservation: enabled' will be reserve all space. element_type |= Pool.ELEMENT_TYPE_VOLUME_THIN element_type |= Pool.ELEMENT_TYPE_VOLUME_FULL elif na_vol['space-reserve'] == 'volume': # space-reserve: 'volume' means only LUN or file marked as # 'Space Reservation: enabled' will be reserve all space. if na_vol['reserve'] == na_vol['reserve-required']: # When 'reserve' == 'reserve-required' it means option # 'fractional_reserve' is set to 100, only with that we # can create full alocated LUN. element_type |= Pool.ELEMENT_TYPE_VOLUME_FULL else: element_type |= Pool.ELEMENT_TYPE_VOLUME_THIN elif na_vol['space-reserve'] == 'none': element_type |= Pool.ELEMENT_TYPE_VOLUME_THIN pool_name = na_vol['name'] pool_id = self._pool_id_of_na_vol_name(na_vol['name']) total_space = int(na_vol['size-total']) free_space = int(na_vol['size-available']) system_id = self.sys_info.id status = Pool.STATUS_UNKNOWN status_info = '' if 'containing-aggregate' in na_vol: for na_aggr in na_aggrs: if na_aggr['name'] == na_vol['containing-aggregate']: status = self._status_of_na_aggr(na_aggr)[0] if not (status & Pool.STATUS_OK): status_info = "Parrent pool '%s'" \ % na_aggr['name'] break if status & Pool.STATUS_OK and na_vol['state'] == 'offline': status = Pool.STATUS_STOPPED status_info = 'Disabled by admin' # This volume should be noted that it is reserved for system # and thus cannot be removed. if pool_name == '/vol/vol0': element_type |= Pool.ELEMENT_TYPE_SYS_RESERVED return Pool(pool_id, pool_name, element_type, 0, total_space, free_space, status, status_info, system_id) @handle_ontap_errors def capabilities(self, system, flags=0): cap = Capabilities() cap.set(Capabilities.VOLUMES) cap.set(Capabilities.VOLUME_CREATE) cap.set(Capabilities.VOLUME_RESIZE) cap.set(Capabilities.VOLUME_REPLICATE) cap.set(Capabilities.VOLUME_REPLICATE_CLONE) cap.set(Capabilities.VOLUME_COPY_RANGE_BLOCK_SIZE) cap.set(Capabilities.VOLUME_COPY_RANGE) cap.set(Capabilities.VOLUME_COPY_RANGE_CLONE) cap.set(Capabilities.VOLUME_DELETE) cap.set(Capabilities.VOLUME_ENABLE) cap.set(Capabilities.VOLUME_DISABLE) cap.set(Capabilities.VOLUME_ISCSI_CHAP_AUTHENTICATION) cap.set(Capabilities.VOLUME_MASK) cap.set(Capabilities.VOLUME_UNMASK) cap.set(Capabilities.ACCESS_GROUPS) cap.set(Capabilities.ACCESS_GROUP_CREATE_WWPN) cap.set(Capabilities.ACCESS_GROUP_CREATE_ISCSI_IQN) cap.set(Capabilities.ACCESS_GROUP_DELETE) cap.set(Capabilities.ACCESS_GROUP_INITIATOR_ADD_WWPN) cap.set(Capabilities.ACCESS_GROUP_INITIATOR_ADD_ISCSI_IQN) cap.set(Capabilities.ACCESS_GROUP_INITIATOR_DELETE) cap.set(Capabilities.VOLUMES_ACCESSIBLE_BY_ACCESS_GROUP) cap.set(Capabilities.ACCESS_GROUPS_GRANTED_TO_VOLUME) cap.set(Capabilities.VOLUME_CHILD_DEPENDENCY) cap.set(Capabilities.VOLUME_CHILD_DEPENDENCY_RM) cap.set(Capabilities.FS) cap.set(Capabilities.FS_DELETE) cap.set(Capabilities.FS_RESIZE) cap.set(Capabilities.FS_CREATE) cap.set(Capabilities.FS_CLONE) cap.set(Capabilities.FILE_CLONE) cap.set(Capabilities.FS_SNAPSHOTS) cap.set(Capabilities.FS_SNAPSHOT_CREATE) cap.set(Capabilities.FS_SNAPSHOT_DELETE) cap.set(Capabilities.FS_SNAPSHOT_RESTORE) cap.set(Capabilities.FS_CHILD_DEPENDENCY) cap.set(Capabilities.FS_CHILD_DEPENDENCY_RM) cap.set(Capabilities.EXPORT_AUTH) cap.set(Capabilities.EXPORTS) cap.set(Capabilities.EXPORT_FS) cap.set(Capabilities.EXPORT_REMOVE) cap.set(Capabilities.EXPORT_CUSTOM_PATH) cap.set(Capabilities.TARGET_PORTS) cap.set(Capabilities.DISKS) cap.set(Capabilities.VOLUME_RAID_INFO) cap.set(Capabilities.POOL_MEMBER_INFO) return cap @handle_ontap_errors def plugin_info(self, flags=0): return "NetApp Filer support", VERSION @handle_ontap_errors def disks(self, search_key=None, search_value=None, flags=0): disks = self.f.disks() return search_property( [self._disk(d, flags) for d in disks], search_key, search_value) @handle_ontap_errors def pools(self, search_key=None, search_value=None, flags=0): pools = [] na_aggrs = self.f.aggregates() for na_aggr in na_aggrs: pools.extend([self._pool_from_na_aggr(na_aggr, flags)]) na_vols = self.f.volumes() for na_vol in na_vols: pools.extend([self._pool_from_na_vol(na_vol, na_aggrs, flags)]) return search_property(pools, search_key, search_value) @handle_ontap_errors def systems(self, flags=0): return [self.sys_info] def _get_volume(self, vol_name, pool_id): return self._lun(self.f.luns_get_specific(pool_id, vol_name, None)[0]) @handle_ontap_errors def volume_create(self, pool, volume_name, size_bytes, provisioning, flags=0): if not pool.element_type & Pool.ELEMENT_TYPE_VOLUME: raise LsmError(ErrorNumber.INVALID_ARGUMENT, "Pool not suitable for creating volumes") # Even pool is full or thin only pool, we still allow user to # create full or thin LUN in case that's what they intend to do so. # TODO: allow user to query provising status of certain LUN. We can # use THIN(not effective) or FULL(not effective) to indicate # pool setting not allow thin/full LUN yet, user can change pool # setting. # Wise user can check pool.element_type before creating full or thin # volume. flag_thin = False if provisioning == Volume.PROVISION_THIN: flag_thin = True na_vol_name = pool.name lun_name = self.f.lun_build_name(na_vol_name, volume_name) try: self.f.lun_create(lun_name, size_bytes, flag_thin) except na.FilerError as fe: if fe.errno == na.FilerError.EVDISK_ERROR_SIZE_TOO_LARGE: raise LsmError( ErrorNumber.NOT_ENOUGH_SPACE, "No enough requested free size in pool") elif fe.errno == na.FilerError.EVDISK_ERROR_VDISK_EXISTS: raise LsmError( ErrorNumber.NAME_CONFLICT, "Requested volume name is already used by other volume") elif fe.errno == na.FilerError.EVDISK_ERROR_SIZE_TOO_SMALL: # Size too small should not be raised. By API defination, # we should create a LUN with mimun size. min_size = self.f.lun_min_size() return self.volume_create( pool, volume_name, min_size, provisioning, flags) elif fe.errno == na.FilerError.EVDISK_ERROR_NO_SUCH_VOLUME: # When NetApp volume is offline, we will get this error also. self._check_na_volume(na_vol_name) else: raise #Get the information about the newly created LUN return None, self._get_volume(lun_name, pool.id) @staticmethod def _vol_to_na_volume_name(volume): return os.path.dirname(_lsm_vol_to_na_vol_path(volume))[5:] @handle_ontap_errors def volume_delete(self, volume, flags=0): try: self.f.lun_delete(_lsm_vol_to_na_vol_path(volume)) except na.FilerError as f_error: # We don't use handle_ontap_errors which use netapp # error message which is not suitable for LSM user. if f_error.errno == na.FilerError.EVDISK_ERROR_VDISK_EXPORTED: raise LsmError(ErrorNumber.IS_MASKED, "Volume is masked to access group") raise return None @staticmethod def _size_kb_padded(size_bytes): return int((size_bytes / 1024) * 1.3) @handle_ontap_errors def volume_resize(self, volume, new_size_bytes, flags=0): try: self.f.lun_resize(_lsm_vol_to_na_vol_path(volume), new_size_bytes) except na.FilerError as fe: if fe.errno == na.FilerError.EVDISK_ERROR_SIZE_TOO_SMALL: min_size = self.f.lun_min_size() try: self.f.lun_resize(_lsm_vol_to_na_vol_path(volume), min_size) except na.FilerError as fe: if fe.errno == na.FilerError.EVDISK_ERROR_SIZE_UNCHANGED: # As requested size is not the one we are send to # self.f.lun_resize(), we should silently pass. pass else: raise elif fe.errno == na.FilerError.EVDISK_ERROR_SIZE_UNCHANGED: raise LsmError(ErrorNumber.NO_STATE_CHANGE, "Requested size is the same as current " "volume size") else: raise return None, self._get_volume(_lsm_vol_to_na_vol_path(volume), volume.pool_id) def _check_na_volume(self, na_vol_name): na_vols = self.f.volumes(volume_name=na_vol_name) if len(na_vols) == 0: raise LsmError(ErrorNumber.NOT_FOUND_POOL, "Pool not found") elif len(na_vols) == 1: # NetApp Volume is disabled. if na_vols[0]['state'] == 'offline': raise LsmError(ErrorNumber.POOL_NOT_READY, "Pool not ready for volume creation") else: raise LsmError(ErrorNumber.PLUGIN_BUG, "volume_create(): " "Got 2 or more na_vols: %s" % na_vols) def _volume_on_aggr(self, pool, volume): search = Ontap._vol_to_na_volume_name(volume) contained_volumes = self.f.aggregate_volume_names(pool.name) return search in contained_volumes @handle_ontap_errors def volume_replicate(self, pool, rep_type, volume_src, name, flags=0): #At the moment we are only supporting space efficient writeable #logical units. Add support for the others later. if rep_type != Volume.REPLICATE_CLONE: raise LsmError(ErrorNumber.NO_SUPPORT, "rep_type not supported") #Check to see if our volume is on a pool that was passed in or that #the pool itself is None if pool is None or self._volume_on_aggr(pool, volume_src): #Thin provision copy the logical unit dest = os.path.dirname(_lsm_vol_to_na_vol_path(volume_src)) + '/' \ + name self.f.clone(_lsm_vol_to_na_vol_path(volume_src), dest) return None, self._get_volume(dest, volume_src.pool_id) else: #TODO Need to get instructions on how to provide this #functionality raise LsmError(ErrorNumber.NO_SUPPORT, "Unable to replicate volume to different pool") @handle_ontap_errors def volume_replicate_range_block_size(self, system, flags=0): return 4096 @handle_ontap_errors def volume_replicate_range(self, rep_type, volume_src, volume_dest, ranges, flags=0): if rep_type != Volume.REPLICATE_CLONE: raise LsmError(ErrorNumber.NO_SUPPORT, "rep_type not supported") self.f.clone(_lsm_vol_to_na_vol_path(volume_src), _lsm_vol_to_na_vol_path(volume_dest), None, ranges) @handle_ontap_errors def volume_enable(self, volume, flags=0): try: return self.f.lun_online(_lsm_vol_to_na_vol_path(volume)) except na.FilerError as fe: if fe.errno == na.FilerError.EVDISK_ERROR_VDISK_NOT_DISABLED: raise LsmError(ErrorNumber.NO_STATE_CHANGE, "Volume is already enabled") raise @handle_ontap_errors def volume_disable(self, volume, flags=0): try: return self.f.lun_offline(_lsm_vol_to_na_vol_path(volume)) except na.FilerError as fe: if fe.errno == na.FilerError.EVDISK_ERROR_VDISK_NOT_ENABLED: raise LsmError(ErrorNumber.NO_STATE_CHANGE, "Volume is already disabled") raise @handle_ontap_errors def volume_mask(self, access_group, volume, flags=0): igroups = self.f.igroups(group_name=access_group.name) if len(igroups) != 1: raise LsmError(ErrorNumber.NOT_FOUND_ACCESS_GROUP, "AccessGroup %s(%d) not found" % (access_group.name, access_group.id)) cur_init_ids = Ontap._initiators_in_group(igroups[0]) if len(cur_init_ids) == 0: raise LsmError( ErrorNumber.EMPTY_ACCESS_GROUP, "Refuse to do volume masking against empty access group") try: self.f.lun_map(access_group.name, _lsm_vol_to_na_vol_path(volume)) except na.FilerError as fe: if fe.errno == na.FilerError.EVDISK_ERROR_INITGROUP_HAS_VDISK: raise LsmError( ErrorNumber.NO_STATE_CHANGE, "Volume is already masked to requested access group") else: raise return None @handle_ontap_errors def volume_unmask(self, access_group, volume, flags=0): try: self.f.lun_unmap( access_group.name, _lsm_vol_to_na_vol_path(volume)) except na.FilerError as filer_error: if filer_error.errno == na.FilerError.EVDISK_ERROR_NO_SUCH_LUNMAP: raise LsmError( ErrorNumber.NO_STATE_CHANGE, "Volume is not masked to requested access group") else: raise return None @staticmethod def _initiators_in_group(g): rc = [] if g: if 'initiators' in g and g['initiators'] is not None: initiators = na.to_list(g['initiators']['initiator-info']) for i in initiators: rc.append(i['initiator-name']) return rc def _access_group(self, g): name = g['initiator-group-name'] if 'initiator-group-uuid' in g: ag_id = g['initiator-group-uuid'] else: ag_id = md5(name) return AccessGroup(ag_id, name, Ontap._initiators_in_group(g), _na_init_type_to_lsm(g), self.sys_info.id) @handle_ontap_errors def access_groups(self, search_key=None, search_value=None, flags=0): groups = self.f.igroups() return search_property( [self._access_group(g) for g in groups], search_key, search_value) @handle_ontap_errors def access_group_create(self, name, init_id, init_type, system, flags=0): if self.sys_info.id != system.id: raise LsmError(ErrorNumber.NOT_FOUND_SYSTEM, "System %s not found" % system.id) # NetApp sometimes(real hardware 8.0.2 and simulator 8.1.1) does not # raise error for initiator conflict. # # Precheck for initiator conflict cur_lsm_groups = self.access_groups() for cur_lsm_group in cur_lsm_groups: if cur_lsm_group.name == name: raise LsmError( ErrorNumber.NAME_CONFLICT, "Requested access group name is already used by other " "access group") if init_id in cur_lsm_group.init_ids: raise LsmError( ErrorNumber.EXISTS_INITIATOR, "Requested initiator is already used by other " "access group") if init_type == AccessGroup.INIT_TYPE_ISCSI_IQN: self.f.igroup_create(name, 'iscsi') elif init_type == AccessGroup.INIT_TYPE_WWPN: self.f.igroup_create(name, 'fcp') else: raise LsmError(ErrorNumber.NO_SUPPORT, "ONTAP only support iSCSI and FC/FCoE, but got " "init_type: %d" % init_type) self.f.igroup_add_initiator(name, init_id) groups = self.access_groups() for g in groups: if g.name == name: return g raise LsmError(ErrorNumber.PLUGIN_BUG, "access_group_create(): Unable to find access group " "%s just created!" % name) @handle_ontap_errors def access_group_delete(self, access_group, flags=0): try: return self.f.igroup_delete(access_group.name) except na.FilerError as f_error: if f_error.errno == \ na.FilerError.EVDISK_ERROR_INITGROUP_MAPS_EXIST: raise LsmError(ErrorNumber.IS_MASKED, "Access Group has volume masked") raise @handle_ontap_errors def access_group_initiator_add(self, access_group, init_id, init_type, flags=0): try: self.f.igroup_add_initiator(access_group.name, init_id) except na.FilerError as oe: if oe.errno == na.FilerError.IGROUP_ALREADY_HAS_INIT: return copy.deepcopy(access_group) elif oe.errno == na.FilerError.NO_SUCH_IGROUP: raise LsmError(ErrorNumber.NOT_FOUND_ACCESS_GROUP, "AccessGroup %s(%d) not found" % (access_group.name, access_group.id)) else: raise na_ags = self.f.igroups(access_group.name) if len(na_ags) != 1: raise LsmError(ErrorNumber.PLUGIN_BUG, "access_group_initiator_add(): Got unexpected" "(not 1) count of na_ag: %s" % na_ags) return self._access_group(na_ags[0]) @handle_ontap_errors def access_group_initiator_delete(self, access_group, init_id, init_type, flags=0): igroups = self.f.igroups(group_name=access_group.name) if len(igroups) != 1: raise LsmError(ErrorNumber.NOT_FOUND_ACCESS_GROUP, "AccessGroup %s(%d) not found" % (access_group.name, access_group.id)) cur_init_ids = Ontap._initiators_in_group(igroups[0]) if init_id not in cur_init_ids: raise LsmError( ErrorNumber.NO_STATE_CHANGE, "Initiator %s does not exist in access group %s" % (init_id, access_group.name)) if len(cur_init_ids) == 1: raise LsmError( ErrorNumber.LAST_INIT_IN_ACCESS_GROUP, "Refuse to remove last initiator from access group") self.f.igroup_del_initiator(access_group.name, init_id) na_ags = self.f.igroups(access_group.name) if len(na_ags) != 1: raise LsmError(ErrorNumber.PLUGIN_BUG, "access_group_initiator_add(): Got unexpected" "(not 1) count of na_ag: %s" % na_ags) return self._access_group(na_ags[0]) @handle_ontap_errors def volumes_accessible_by_access_group(self, access_group, flags=0): rc = [] if len(access_group.init_ids): luns = self.f.lun_initiator_list_map_info(access_group.init_ids[0], access_group.name) rc = [self._lun(l) for l in luns] return rc @handle_ontap_errors def access_groups_granted_to_volume(self, volume, flags=0): groups = self.f.lun_map_list_info(_lsm_vol_to_na_vol_path(volume)) return [self._access_group(g) for g in groups] @handle_ontap_errors def iscsi_chap_auth(self, init_id, in_user, in_password, out_user, out_password, flags=0): if out_user and out_password and \ (in_user is None or in_password is None): raise LsmError(ErrorNumber.INVALID_ARGUMENT, "out_user and out_password only supported if " "inbound is supplied") self.f.iscsi_initiator_add_auth(init_id, in_user, in_password, out_user, out_password) @staticmethod def _rpercent(total, current): p = 1 - (current / float(total)) p = min(int(100 * p), 100) return p def _restore_file_status(self, num): running = self.f.snapshot_file_restore_num() if running: running = min(num, running) return JobStatus.INPROGRESS, Ontap._rpercent(num, running), None return JobStatus.COMPLETE, 100, None def _clone_split_status(self, volumes): vols = volumes.split(',') current = len(vols) #It doesn't appear that we have a good percentage #indicator from the clone split status... running = self.f.volume_split_status() for v in vols: if v not in running: current -= 1 if not running: return JobStatus.COMPLETE, 100, None else: return JobStatus.INPROGRESS, \ Ontap._rpercent(len(vols), current), None @handle_ontap_errors def job_status(self, job_id, flags=0): if job_id is None and '@' not in job_id: raise LsmError(ErrorNumber.INVALID_ARGUMENT, "Invalid job, missing @") job = job_id.split('@', 2) if job[0] == Ontap.SS_JOB: return self._restore_file_status(int(job[1])) elif job[0] == Ontap.SPLIT_JOB: return self._clone_split_status(job[1]) raise LsmError(ErrorNumber.INVALID_ARGUMENT, "Invalid job") @handle_ontap_errors def job_free(self, job_id, flags=0): return None @handle_ontap_errors def fs(self, search_key=None, search_value=None, flags=0): volumes = self.f.volumes() pools = self.pools() return search_property( [self._vol(v, pools) for v in volumes], search_key, search_value) @handle_ontap_errors def fs_delete(self, fs, flags=0): self.f.volume_delete(fs.name) @handle_ontap_errors def fs_resize(self, fs, new_size_bytes, flags=0): diff = new_size_bytes - fs.total_space diff = Ontap._size_kb_padded(diff) self.f.volume_resize(fs.name, diff) return None, self._vol(self.f.volumes(fs.name)[0]) @handle_ontap_errors def fs_create(self, pool, name, size_bytes, flags=0): self.f.volume_create(pool.name, name, size_bytes) return None, self._vol(self.f.volumes(name)[0]) @handle_ontap_errors def fs_clone(self, src_fs, dest_fs_name, snapshot=None, flags=0): self.f.volume_clone(src_fs.name, dest_fs_name, snapshot) return None, self._vol(self.f.volumes(dest_fs_name)[0]) @staticmethod def build_name(volume_name, relative_name): return "/vol/%s/%s" % (volume_name, relative_name) @handle_ontap_errors def fs_file_clone(self, fs, src_file_name, dest_file_name, snapshot=None, flags=0): full_src = Ontap.build_name(fs.name, src_file_name) full_dest = Ontap.build_name(fs.name, dest_file_name) ss = None if snapshot: ss = snapshot.name self.f.clone(full_src, full_dest, ss) return None @handle_ontap_errors def fs_snapshots(self, fs, flags=0): snapshots = self.f.snapshots(fs.name) return [Ontap._ss(s) for s in snapshots] @handle_ontap_errors def fs_snapshot_create(self, fs, snapshot_name, flags=0): #We can't do files, so we will do them all snap = self.f.snapshot_create(fs.name, snapshot_name) return None, Ontap._ss(snap) @handle_ontap_errors def fs_snapshot_delete(self, fs, snapshot, flags=0): self.f.snapshot_delete(fs.name, snapshot.name) def _ss_restore_files(self, volume_name, snapshot_name, files, restore_files): for i in range(len(files)): src = Ontap.build_name(volume_name, files[i]) dest = None if restore_files and len(restore_files): dest = Ontap.build_name(volume_name, restore_files[i]) self.f.snapshot_restore_file(snapshot_name, src, dest) @handle_ontap_errors def fs_snapshot_restore(self, fs, snapshot, files, restore_files, all_files=False, flags=0): """ Restores a FS or files on a FS. Note: Restoring an individual file is a O(n) operation, i.e. time it takes to restore a file depends on the file size. Reverting an entire FS is O(1). Try to avoid restoring individual files from a snapshot. """ if files is None and all_files: self.f.snapshot_restore_volume(fs.name, snapshot.name) return None elif files: if restore_files and len(files) != len(restore_files): raise LsmError(ErrorNumber.INVALID_ARGUMENT, "num files != num restore_files") self._ss_restore_files(fs.name, snapshot.name, files, restore_files) return "%s@%d" % (Ontap.SS_JOB, len(files)) else: raise LsmError(ErrorNumber.INVALID_ARGUMENT, "Invalid parameter combination") @handle_ontap_errors def export_auth(self, flags=0): """ Returns the types of authentication that are available for NFS """ return self.f.export_auth_types() @staticmethod def _get_group(access_group, e): rc = [] if access_group in e: for r in na.to_list(e[access_group]['exports-hostname-info']): if 'all-hosts' in r: if r['all-hosts'] == 'true': rc.append('*') else: rc.append(r['name']) return rc @staticmethod def _get_value(key, e): if key in e: return e[key] else: return None @staticmethod def _get_volume_id(volumes, vol_name): for v in volumes: if v.name == vol_name: return v.id raise RuntimeError("Volume not found in volumes:" + ":".join(volumes) + " " + vol_name) @staticmethod def _get_volume_from_path(path): #Volume paths have the form /vol// return path[5:].split('/')[0] @staticmethod def _export(volumes, e): if 'actual-pathname' in e: path = e['actual-pathname'] export = e['pathname'] else: path = e['pathname'] export = e['pathname'] vol_name = Ontap._get_volume_from_path(path) fs_id = Ontap._get_volume_id(volumes, vol_name) return NfsExport(md5(vol_name + fs_id), fs_id, export, e['sec-flavor']['sec-flavor-info']['flavor'], Ontap._get_group('root', e), Ontap._get_group('read-write', e), Ontap._get_group('read-only', e), NfsExport.ANON_UID_GID_NA, NfsExport.ANON_UID_GID_NA, None) @handle_ontap_errors def exports(self, search_key=None, search_value=None, flags=0): #Get the file systems once and pass to _export which needs to lookup #the file system id by name. v = self.fs() return search_property( [Ontap._export(v, e) for e in self.f.nfs_exports()], search_key, search_value) def _get_volume_from_id(self, fs_id): fs = self.fs() for i in fs: if i.id == fs_id: return i raise RuntimeError("fs id not found in fs:" + fs_id) def _current_export(self, export_path): """ Checks to see if we already have this export. """ cur_exports = self.exports() for ce in cur_exports: if ce.export_path == export_path: return True return False @handle_ontap_errors def export_fs(self, fs_id, export_path, root_list, rw_list, ro_list, anon_uid, anon_gid, auth_type, options, flags=0): """ Creates or modifies the specified export """ # NetApp does not support anon_gid setting. if not (anon_gid == -1 or anon_gid == 0xFFFFFFFFFFFFFFFF): raise LsmError(ErrorNumber.INVALID_ARGUMENT, "ontap plugin does not support " "anon_gid setting") #Get the volume info from the fs_id vol = self._get_volume_from_id(fs_id) # API states that if export path is None the plug-in will select # export path if export_path is None: export_path = '/vol/' + vol.name #If the export already exists we need to update the existing export #not create a new one. if self._current_export(export_path): method = self.f.nfs_export_fs_modify2 else: method = self.f.nfs_export_fs2 method('/vol/' + vol.name, export_path, ro_list, rw_list, root_list, anon_uid, auth_type) current_exports = self.exports() for e in current_exports: if e.fs_id == fs_id and e.export_path == export_path: return e raise LsmError(ErrorNumber.PLUGIN_BUG, "export not created successfully!") @handle_ontap_errors def export_remove(self, export, flags=0): self.f.nfs_export_remove([export.export_path]) @handle_ontap_errors def volume_child_dependency(self, volume, flags=0): return False @handle_ontap_errors def volume_child_dependency_rm(self, volume, flags=0): return None @handle_ontap_errors def fs_child_dependency(self, fs, files=None, flags=0): rc = False #TODO: Make sure file actually exists if specified if not files: children = self.f.volume_children(fs.name) if children: rc = True return rc @handle_ontap_errors def fs_child_dependency_rm(self, fs, files=None, flags=0): if files: return None else: children = self.f.volume_children(fs.name) if children: for c in children: self.f.volume_split_clone(c) return "%s@%s" % (Ontap.SPLIT_JOB, ",".join(children)) return None @handle_ontap_errors def target_ports(self, search_key=None, search_value=None, flags=0): tp = [] #Get all FC fcp = self.f.fcp_list() for f in fcp: a = f['addr'] adapter = f['adapter'] tp.append(TargetPort(md5(a), TargetPort.TYPE_FC, a, a, a, adapter, self.sys_info.id)) node_name = self.f.iscsi_node_name() iscsi = self.f.iscsi_list() for i in iscsi: #Get all iSCSI service_address = node_name network_address = "%s:%s" % (i['ip'], i['port']) physical_address = i['mac'] physical_name = i['interface'] tid = md5(service_address + network_address + physical_address + physical_name) tp.append(TargetPort(tid, TargetPort.TYPE_ISCSI, service_address, network_address, physical_address, physical_name, self.sys_info.id)) return search_property(tp, search_key, search_value) @staticmethod def _raid_type_of_na_aggr(na_aggr): na_raid_statuses = na_aggr['raid-status'].split(',') if 'mixed_raid_type' in na_raid_statuses: return Volume.RAID_TYPE_MIXED elif 'raid0' in na_raid_statuses: return Volume.RAID_TYPE_RAID0 elif 'raid4' in na_raid_statuses: return Volume.RAID_TYPE_RAID4 elif 'raid_dp' in na_raid_statuses: return Volume.RAID_TYPE_RAID6 return Volume.RAID_TYPE_UNKNOWN @handle_ontap_errors def volume_raid_info(self, volume, flags=0): na_vol_name = Ontap._get_volume_from_path(volume.pool_id) na_vol = self.f.volumes(volume_name=na_vol_name) if len(na_vol) == 0: # If parent pool not found, then this LSM volume should not exist. raise LsmError( ErrorNumber.NOT_FOUND_VOLUME, "Volume not found") if len(na_vol) != 1: raise LsmError( ErrorNumber.PLUGIN_BUG, "volume_raid_info(): Got 2+ na_vols from self.f.volumes() " "%s" % na_vol) na_vol = na_vol[0] na_aggr_name = na_vol['containing-aggregate'] na_aggr = self.f.aggregates(aggr_name=na_aggr_name)[0] raid_type = Ontap._raid_type_of_na_aggr(na_aggr) disk_count = int(na_aggr['disk-count']) return [ raid_type, Ontap._STRIP_SIZE, disk_count, Ontap._STRIP_SIZE, Ontap._OPT_IO_SIZE] @handle_ontap_errors def pool_member_info(self, pool, flags=0): if pool.element_type & Pool.ELEMENT_TYPE_VOLUME: # We got a NetApp volume raid_type = Volume.RAID_TYPE_OTHER member_type = Pool.MEMBER_TYPE_POOL na_vol = self.f.volumes(volume_name=pool.name)[0] disk_ids = [na_vol['containing-aggregate']] else: # We got a NetApp aggregate member_type = Pool.MEMBER_TYPE_DISK na_aggr = self.f.aggregates(aggr_name=pool.name)[0] raid_type = Ontap._raid_type_of_na_aggr(na_aggr) disk_ids = list( Ontap._disk_id(d) for d in self.f.disks() if 'aggregate' in d and d['aggregate'] == pool.name) return raid_type, member_type, disk_ids libstoragemgmt-1.2.3/plugin/ontap/__init__.py0000664000175000017500000000000012537546123016206 00000000000000libstoragemgmt-1.2.3/plugin/Makefile.am0000664000175000017500000000160612537546123015025 00000000000000SUBDIRS=simc megaraid hpsa plugindir = $(pythondir)/lsm/plugin plugin_PYTHON= __init__.py simdir = $(plugindir)/sim sim_PYTHON = \ sim/__init__.py \ sim/simulator.py \ sim/simarray.py targetddir = $(plugindir)/targetd targetd_PYTHON = \ targetd/__init__.py \ targetd/targetd.py ontapdir = $(plugindir)/ontap ontap_PYTHON = \ ontap/__init__.py \ ontap/ontap.py \ ontap/na.py smispydir = $(plugindir)/smispy smispy_PYTHON = \ smispy/__init__.py \ smispy/smis.py \ smispy/utils.py \ smispy/smis_common.py \ smispy/dmtf.py \ smispy/smis_cap.py \ smispy/smis_sys.py \ smispy/smis_pool.py \ smispy/smis_disk.py \ smispy/smis_ag.py \ smispy/smis_vol.py nstordir = $(plugindir)/nstor nstor_PYTHON = \ nstor/__init__.py \ nstor/nstor.py dist_bin_SCRIPTS= \ sim/sim_lsmplugin \ smispy/smispy_lsmplugin \ nstor/nstor_lsmplugin \ ontap/ontap_lsmplugin \ targetd/targetd_lsmplugin libstoragemgmt-1.2.3/plugin/megaraid/0000775000175000017500000000000012542455463014621 500000000000000libstoragemgmt-1.2.3/plugin/megaraid/megaraid_lsmplugin0000775000175000017500000000233112537737032020330 00000000000000#!/usr/bin/env python2 # Copyright (C) 2011-2013 Red Hat, Inc. # This library is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2.1 of the License, or any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library; If not, see . # # Author: tasleson import sys import syslog import traceback try: from lsm import PluginRunner from lsm.plugin.megaraid import MegaRAID if __name__ == '__main__': PluginRunner(MegaRAID, sys.argv).run() except Exception: #This should be quite rare, but when it does happen this is pretty #key in understanding what happened, especially when it happens when #running from the daemon. msg = str(traceback.format_exc()) syslog.syslog(syslog.LOG_ERR, msg) sys.stderr.write(msg) sys.exit(1) libstoragemgmt-1.2.3/plugin/megaraid/Makefile.am0000664000175000017500000000027112537546123016573 00000000000000if WITH_MEGARAID plugindir = $(pythondir)/lsm/plugin megaraiddir = $(plugindir)/megaraid megaraid_PYTHON = __init__.py megaraid.py utils.py dist_bin_SCRIPTS= megaraid_lsmplugin endif libstoragemgmt-1.2.3/plugin/megaraid/__init__.py0000664000175000017500000000006212537546123016646 00000000000000from lsm.plugin.megaraid.megaraid import MegaRAID libstoragemgmt-1.2.3/plugin/megaraid/Makefile.in0000664000175000017500000004411512542455445016613 00000000000000# Makefile.in generated by automake 1.13.4 from Makefile.am. # @configure_input@ # Copyright (C) 1994-2013 Free Software Foundation, Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__is_gnu_make = test -n '$(MAKEFILE_LIST)' && test -n '$(MAKELEVEL)' am__make_running_with_option = \ case $${target_option-} in \ ?) ;; \ *) echo "am__make_running_with_option: internal error: invalid" \ "target option '$${target_option-}' specified" >&2; \ exit 1;; \ esac; \ has_opt=no; \ sane_makeflags=$$MAKEFLAGS; \ if $(am__is_gnu_make); then \ sane_makeflags=$$MFLAGS; \ else \ case $$MAKEFLAGS in \ *\\[\ \ ]*) \ bs=\\; \ sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \ | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \ esac; \ fi; \ skip_next=no; \ strip_trailopt () \ { \ flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \ }; \ for flg in $$sane_makeflags; do \ test $$skip_next = yes && { skip_next=no; continue; }; \ case $$flg in \ *=*|--*) continue;; \ -*I) strip_trailopt 'I'; skip_next=yes;; \ -*I?*) strip_trailopt 'I';; \ -*O) strip_trailopt 'O'; skip_next=yes;; \ -*O?*) strip_trailopt 'O';; \ -*l) strip_trailopt 'l'; skip_next=yes;; \ -*l?*) strip_trailopt 'l';; \ -[dEDm]) skip_next=yes;; \ -[JT]) skip_next=yes;; \ esac; \ case $$flg in \ *$$target_option*) has_opt=yes; break;; \ esac; \ done; \ test $$has_opt = yes am__make_dryrun = (target_option=n; $(am__make_running_with_option)) am__make_keepgoing = (target_option=k; $(am__make_running_with_option)) pkgdatadir = $(datadir)/@PACKAGE@ pkgincludedir = $(includedir)/@PACKAGE@ pkglibdir = $(libdir)/@PACKAGE@ pkglibexecdir = $(libexecdir)/@PACKAGE@ am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ subdir = plugin/megaraid DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/Makefile.am \ $(am__dist_bin_SCRIPTS_DIST) $(am__megaraid_PYTHON_DIST) \ $(top_srcdir)/build-aux/py-compile ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = $(top_srcdir)/m4/ax_python_module.m4 \ $(top_srcdir)/m4/libtool.m4 $(top_srcdir)/m4/ltoptions.m4 \ $(top_srcdir)/m4/ltsugar.m4 $(top_srcdir)/m4/ltversion.m4 \ $(top_srcdir)/m4/lt~obsolete.m4 $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) mkinstalldirs = $(install_sh) -d CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = CONFIG_CLEAN_VPATH_FILES = am__dist_bin_SCRIPTS_DIST = megaraid_lsmplugin am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; am__vpath_adj = case $$p in \ $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ *) f=$$p;; \ esac; am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`; am__install_max = 40 am__nobase_strip_setup = \ srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'` am__nobase_strip = \ for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||" am__nobase_list = $(am__nobase_strip_setup); \ for p in $$list; do echo "$$p $$p"; done | \ sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \ $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \ if (++n[$$2] == $(am__install_max)) \ { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \ END { for (dir in files) print dir, files[dir] }' am__base_list = \ sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \ sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g' am__uninstall_files_from_dir = { \ test -z "$$files" \ || { test ! -d "$$dir" && test ! -f "$$dir" && test ! -r "$$dir"; } \ || { echo " ( cd '$$dir' && rm -f" $$files ")"; \ $(am__cd) "$$dir" && rm -f $$files; }; \ } am__installdirs = "$(DESTDIR)$(bindir)" "$(DESTDIR)$(megaraiddir)" SCRIPTS = $(dist_bin_SCRIPTS) AM_V_P = $(am__v_P_@AM_V@) am__v_P_ = $(am__v_P_@AM_DEFAULT_V@) am__v_P_0 = false am__v_P_1 = : AM_V_GEN = $(am__v_GEN_@AM_V@) am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@) am__v_GEN_0 = @echo " GEN " $@; am__v_GEN_1 = AM_V_at = $(am__v_at_@AM_V@) am__v_at_ = $(am__v_at_@AM_DEFAULT_V@) am__v_at_0 = @ am__v_at_1 = SOURCES = DIST_SOURCES = am__can_run_installinfo = \ case $$AM_UPDATE_INFO_DIR in \ n|no|NO) false;; \ *) (install-info --version) >/dev/null 2>&1;; \ esac am__megaraid_PYTHON_DIST = __init__.py megaraid.py utils.py am__py_compile = PYTHON=$(PYTHON) $(SHELL) $(py_compile) am__pep3147_tweak = \ sed -e 's|\.py$$||' -e 's|[^/]*$$|__pycache__/&.*.py|' py_compile = $(top_srcdir)/build-aux/py-compile am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) ACLOCAL = @ACLOCAL@ AMTAR = @AMTAR@ AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@ AR = @AR@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ FGREP = @FGREP@ GREP = @GREP@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ JSON_CFLAGS = @JSON_CFLAGS@ JSON_LIBS = @JSON_LIBS@ LD = @LD@ LDFLAGS = @LDFLAGS@ LIBCHECK_CFLAGS = @LIBCHECK_CFLAGS@ LIBCHECK_LIBS = @LIBCHECK_LIBS@ LIBCONFIG_CFLAGS = @LIBCONFIG_CFLAGS@ LIBCONFIG_LIBS = @LIBCONFIG_LIBS@ LIBGLIB_CFLAGS = @LIBGLIB_CFLAGS@ LIBGLIB_LIBS = @LIBGLIB_LIBS@ LIBMICROHTTPD_CFLAGS = @LIBMICROHTTPD_CFLAGS@ LIBMICROHTTPD_LIBS = @LIBMICROHTTPD_LIBS@ LIBOBJS = @LIBOBJS@ LIBS = @LIBS@ LIBSM_LIBTOOL_VERSION = @LIBSM_LIBTOOL_VERSION@ LIBSM_MAJOR_VERSION = @LIBSM_MAJOR_VERSION@ LIBSM_MICRO_VERSION = @LIBSM_MICRO_VERSION@ LIBSM_MINOR_VERSION = @LIBSM_MINOR_VERSION@ LIBSM_VERSION = @LIBSM_VERSION@ LIBSM_VERSION_INFO = @LIBSM_VERSION_INFO@ LIBSM_VERSION_NUMBER = @LIBSM_VERSION_NUMBER@ LIBTOOL = @LIBTOOL@ LIBXML_CFLAGS = @LIBXML_CFLAGS@ LIBXML_LIBS = @LIBXML_LIBS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBOBJS = @LTLIBOBJS@ MAINT = @MAINT@ MAKEINFO = @MAKEINFO@ MANIFEST_TOOL = @MANIFEST_TOOL@ MKDIR_P = @MKDIR_P@ NM = @NM@ NMEDIT = @NMEDIT@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_URL = @PACKAGE_URL@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PERL = @PERL@ PKG_CONFIG = @PKG_CONFIG@ PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@ PKG_CONFIG_PATH = @PKG_CONFIG_PATH@ PYTHON = @PYTHON@ PYTHON_EXEC_PREFIX = @PYTHON_EXEC_PREFIX@ PYTHON_PLATFORM = @PYTHON_PLATFORM@ PYTHON_PREFIX = @PYTHON_PREFIX@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SSL_LIBS = @SSL_LIBS@ STRIP = @STRIP@ VERSION = @VERSION@ YAJL_LIBS = @YAJL_LIBS@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_AR = @ac_ct_AR@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ bashcompletiondir = @bashcompletiondir@ bindir = @bindir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ datadir = @datadir@ datarootdir = @datarootdir@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ install_sh = @install_sh@ libdir = @libdir@ libexecdir = @libexecdir@ localedir = @localedir@ localstatedir = @localstatedir@ mandir = @mandir@ mkdir_p = @mkdir_p@ oldincludedir = @oldincludedir@ pdfdir = @pdfdir@ pkgpyexecdir = @pkgpyexecdir@ pkgpythondir = @pkgpythondir@ prefix = @prefix@ program_transform_name = @program_transform_name@ psdir = @psdir@ pyexecdir = @pyexecdir@ pythondir = @pythondir@ sbindir = @sbindir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ systemdsystemunitdir = @systemdsystemunitdir@ target_alias = @target_alias@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ @WITH_MEGARAID_TRUE@plugindir = $(pythondir)/lsm/plugin @WITH_MEGARAID_TRUE@megaraiddir = $(plugindir)/megaraid @WITH_MEGARAID_TRUE@megaraid_PYTHON = __init__.py megaraid.py utils.py @WITH_MEGARAID_TRUE@dist_bin_SCRIPTS = megaraid_lsmplugin all: all-am .SUFFIXES: $(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu plugin/megaraid/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --gnu plugin/megaraid/Makefile .PRECIOUS: Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): install-dist_binSCRIPTS: $(dist_bin_SCRIPTS) @$(NORMAL_INSTALL) @list='$(dist_bin_SCRIPTS)'; test -n "$(bindir)" || list=; \ if test -n "$$list"; then \ echo " $(MKDIR_P) '$(DESTDIR)$(bindir)'"; \ $(MKDIR_P) "$(DESTDIR)$(bindir)" || exit 1; \ fi; \ for p in $$list; do \ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ if test -f "$$d$$p"; then echo "$$d$$p"; echo "$$p"; else :; fi; \ done | \ sed -e 'p;s,.*/,,;n' \ -e 'h;s|.*|.|' \ -e 'p;x;s,.*/,,;$(transform)' | sed 'N;N;N;s,\n, ,g' | \ $(AWK) 'BEGIN { files["."] = ""; dirs["."] = 1; } \ { d=$$3; if (dirs[d] != 1) { print "d", d; dirs[d] = 1 } \ if ($$2 == $$4) { files[d] = files[d] " " $$1; \ if (++n[d] == $(am__install_max)) { \ print "f", d, files[d]; n[d] = 0; files[d] = "" } } \ else { print "f", d "/" $$4, $$1 } } \ END { for (d in files) print "f", d, files[d] }' | \ while read type dir files; do \ if test "$$dir" = .; then dir=; else dir=/$$dir; fi; \ test -z "$$files" || { \ echo " $(INSTALL_SCRIPT) $$files '$(DESTDIR)$(bindir)$$dir'"; \ $(INSTALL_SCRIPT) $$files "$(DESTDIR)$(bindir)$$dir" || exit $$?; \ } \ ; done uninstall-dist_binSCRIPTS: @$(NORMAL_UNINSTALL) @list='$(dist_bin_SCRIPTS)'; test -n "$(bindir)" || exit 0; \ files=`for p in $$list; do echo "$$p"; done | \ sed -e 's,.*/,,;$(transform)'`; \ dir='$(DESTDIR)$(bindir)'; $(am__uninstall_files_from_dir) mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs install-megaraidPYTHON: $(megaraid_PYTHON) @$(NORMAL_INSTALL) @list='$(megaraid_PYTHON)'; dlist=; list2=; test -n "$(megaraiddir)" || list=; \ if test -n "$$list"; then \ echo " $(MKDIR_P) '$(DESTDIR)$(megaraiddir)'"; \ $(MKDIR_P) "$(DESTDIR)$(megaraiddir)" || exit 1; \ fi; \ for p in $$list; do \ if test -f "$$p"; then b=; else b="$(srcdir)/"; fi; \ if test -f $$b$$p; then \ $(am__strip_dir) \ dlist="$$dlist $$f"; \ list2="$$list2 $$b$$p"; \ else :; fi; \ done; \ for file in $$list2; do echo $$file; done | $(am__base_list) | \ while read files; do \ echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(megaraiddir)'"; \ $(INSTALL_DATA) $$files "$(DESTDIR)$(megaraiddir)" || exit $$?; \ done || exit $$?; \ if test -n "$$dlist"; then \ $(am__py_compile) --destdir "$(DESTDIR)" \ --basedir "$(megaraiddir)" $$dlist; \ else :; fi uninstall-megaraidPYTHON: @$(NORMAL_UNINSTALL) @list='$(megaraid_PYTHON)'; test -n "$(megaraiddir)" || list=; \ py_files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ test -n "$$py_files" || exit 0; \ dir='$(DESTDIR)$(megaraiddir)'; \ pyc_files=`echo "$$py_files" | sed 's|$$|c|'`; \ pyo_files=`echo "$$py_files" | sed 's|$$|o|'`; \ py_files_pep3147=`echo "$$py_files" | $(am__pep3147_tweak)`; \ echo "$$py_files_pep3147";\ pyc_files_pep3147=`echo "$$py_files_pep3147" | sed 's|$$|c|'`; \ pyo_files_pep3147=`echo "$$py_files_pep3147" | sed 's|$$|o|'`; \ st=0; \ for files in \ "$$py_files" \ "$$pyc_files" \ "$$pyo_files" \ "$$pyc_files_pep3147" \ "$$pyo_files_pep3147" \ ; do \ $(am__uninstall_files_from_dir) || st=$$?; \ done; \ exit $$st tags TAGS: ctags CTAGS: cscope cscopelist: distdir: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done check-am: all-am check: check-am all-am: Makefile $(SCRIPTS) installdirs: for dir in "$(DESTDIR)$(bindir)" "$(DESTDIR)$(megaraiddir)"; do \ test -z "$$dir" || $(MKDIR_P) "$$dir"; \ done install: install-am install-exec: install-exec-am install-data: install-data-am uninstall: uninstall-am install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-am install-strip: if test -z '$(STRIP)'; then \ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ install; \ else \ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ fi mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-am clean-am: clean-generic clean-libtool mostlyclean-am distclean: distclean-am -rm -f Makefile distclean-am: clean-am distclean-generic dvi: dvi-am dvi-am: html: html-am html-am: info: info-am info-am: install-data-am: install-megaraidPYTHON install-dvi: install-dvi-am install-dvi-am: install-exec-am: install-dist_binSCRIPTS install-html: install-html-am install-html-am: install-info: install-info-am install-info-am: install-man: install-pdf: install-pdf-am install-pdf-am: install-ps: install-ps-am install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-am -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-am mostlyclean-am: mostlyclean-generic mostlyclean-libtool pdf: pdf-am pdf-am: ps: ps-am ps-am: uninstall-am: uninstall-dist_binSCRIPTS uninstall-megaraidPYTHON .MAKE: install-am install-strip .PHONY: all all-am check check-am clean clean-generic clean-libtool \ cscopelist-am ctags-am distclean distclean-generic \ distclean-libtool distdir dvi dvi-am html html-am info info-am \ install install-am install-data install-data-am \ install-dist_binSCRIPTS install-dvi install-dvi-am \ install-exec install-exec-am install-html install-html-am \ install-info install-info-am install-man \ install-megaraidPYTHON install-pdf install-pdf-am install-ps \ install-ps-am install-strip installcheck installcheck-am \ installdirs maintainer-clean maintainer-clean-generic \ mostlyclean mostlyclean-generic mostlyclean-libtool pdf pdf-am \ ps ps-am tags-am uninstall uninstall-am \ uninstall-dist_binSCRIPTS uninstall-megaraidPYTHON # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: libstoragemgmt-1.2.3/plugin/megaraid/utils.py0000664000175000017500000000324412537737032016255 00000000000000## Copyright (C) 2015 Red Hat, Inc. # This library is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2.1 of the License, or any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library; If not, see . # # Author: Gris Ge import subprocess import os def cmd_exec(cmds): """ Execute provided command and return the STDOUT as string. Raise ExecError if command return code is not zero """ cmd_popen = subprocess.Popen( cmds, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env={"PATH": os.getenv("PATH")}) str_stdout = "".join(list(cmd_popen.stdout)).strip() str_stderr = "".join(list(cmd_popen.stderr)).strip() errno = cmd_popen.wait() if errno != 0: raise ExecError(" ".join(cmds), errno, str_stdout, str_stderr) return str_stdout class ExecError(Exception): def __init__(self, cmd, errno, stdout, stderr, *args, **kwargs): Exception.__init__(self, *args, **kwargs) self.cmd = cmd self.errno = errno self.stdout = stdout self.stderr = stderr def __str__(self): return "cmd: '%s', errno: %d, stdout: '%s', stderr: '%s'" % \ (self.cmd, self.errno, self.stdout, self.stderr) libstoragemgmt-1.2.3/plugin/megaraid/megaraid.py0000664000175000017500000007165712542267433016702 00000000000000# Copyright (C) 2015 Red Hat, Inc. # This library is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2.1 of the License, or any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library; If not, see . # # Author: Gris Ge import os import json import re import errno import math from lsm import (uri_parse, search_property, size_human_2_size_bytes, Capabilities, LsmError, ErrorNumber, System, Client, Disk, VERSION, IPlugin, Pool, Volume) from lsm.plugin.megaraid.utils import cmd_exec, ExecError # Naming scheme # mega_sys_path /c0 # mega_disk_path /c0/e64/s0 # lsi_disk_id 0:64:0 def _handle_errors(method): def _wrapper(*args, **kwargs): try: return method(*args, **kwargs) except LsmError: raise except KeyError as key_error: raise LsmError( ErrorNumber.PLUGIN_BUG, "Expected key missing from MegaRAID storcli output:%s" % key_error) except ExecError as exec_error: raise LsmError(ErrorNumber.PLUGIN_BUG, str(exec_error)) except Exception as common_error: raise LsmError( ErrorNumber.PLUGIN_BUG, "Got unexpected error %s" % common_error) return _wrapper def _blk_count_of(mega_disk_size): blk_count_regex = re.compile("(0x[0-9a-f]+) Sectors") blk_count_search = blk_count_regex.search(mega_disk_size) if blk_count_search: return int(blk_count_search.group(1), 16) return Disk.BLOCK_COUNT_NOT_FOUND def _disk_type_of(disk_show_basic_dict): """ Return the 'Drive /c0/e64/s0' entry of '/c0/e64/s0 show all' """ disk_media = disk_show_basic_dict['Med'] disk_interface = disk_show_basic_dict['Intf'] if disk_media == 'HDD': if disk_interface == 'SATA': return Disk.TYPE_SATA elif disk_interface == 'SAS': return Disk.TYPE_SAS elif disk_interface == 'Parallel SCSI': return Disk.TYPE_SCSI elif disk_interface == 'FC': return Disk.TYPE_FC else: return Disk.TYPE_HDD elif disk_media == 'SSD': return Disk.TYPE_SSD return Disk.TYPE_UNKNOWN _DISK_STATE_MAP = { 'Onln': Disk.STATUS_OK, 'Offln': Disk.STATUS_ERROR, 'GHS': Disk.STATUS_SPARE_DISK | Disk.STATUS_OK, 'DHS': Disk.STATUS_SPARE_DISK | Disk.STATUS_OK, 'UGood': Disk.STATUS_FREE | Disk.STATUS_OK, 'UBad': Disk.STATUS_FREE | Disk.STATUS_ERROR, 'Rbld': Disk.STATUS_RECONSTRUCT, } def _disk_status_of(disk_show_basic_dict, disk_show_stat_dict): disk_status = _DISK_STATE_MAP.get( disk_show_basic_dict['State'], 0) if disk_show_stat_dict['Media Error Count'] or \ disk_show_stat_dict['Other Error Count'] or \ disk_show_stat_dict['S.M.A.R.T alert flagged by drive'] != 'No': disk_status -= Disk.STATUS_OK disk_status |= Disk.STATUS_ERROR elif disk_show_stat_dict['Predictive Failure Count']: disk_status -= Disk.STATUS_OK disk_status |= Disk.STATUS_PREDICTIVE_FAILURE if disk_show_basic_dict['Sp'] == 'D': disk_status |= Disk.STATUS_STOPPED if disk_show_basic_dict['Sp'] == 'F': disk_status |= Disk.STATUS_OTHER if disk_status == 0: disk_status = Disk.STATUS_UNKNOWN return disk_status def _mega_size_to_lsm(mega_size): """ LSI Using 'TB, GB, MB, KB' and etc, for LSM, they are 'TiB' and etc. Return int of block bytes """ re_regex = re.compile("^([0-9.]+) ([EPTGMK])B$") re_match = re_regex.match(mega_size) if re_match: return size_human_2_size_bytes( "%s%siB" % (re_match.group(1), re_match.group(2))) raise LsmError( ErrorNumber.PLUGIN_BUG, "_mega_size_to_lsm(): Got unexpected LSI size string %s" % mega_size) _POOL_STATUS_MAP = { 'Onln': Pool.STATUS_OK, 'Dgrd': Pool.STATUS_DEGRADED | Pool.STATUS_OK, 'Pdgd': Pool.STATUS_DEGRADED | Pool.STATUS_OK, 'Offln': Pool.STATUS_ERROR, 'Rbld': Pool.STATUS_RECONSTRUCTING | Pool.STATUS_DEGRADED | Pool.STATUS_OK, 'Optl': Pool.STATUS_OK, } def _pool_status_of(dg_top): """ Return status """ if dg_top['State'] in _POOL_STATUS_MAP.keys(): return _POOL_STATUS_MAP[dg_top['State']] return Pool.STATUS_UNKNOWN def _pool_id_of(dg_id, sys_id): return "%s:DG%s" % (sys_id, dg_id) _RAID_TYPE_MAP = { 'RAID0': Volume.RAID_TYPE_RAID0, 'RAID1': Volume.RAID_TYPE_RAID1, 'RAID5': Volume.RAID_TYPE_RAID5, 'RAID6': Volume.RAID_TYPE_RAID6, 'RAID00': Volume.RAID_TYPE_RAID0, # Some MegaRAID only support max 16 disks in each span. # To support 16+ disks in on group, MegaRAI has RAID00 or even RAID000. # All of them are considered as RAID0 'RAID10': Volume.RAID_TYPE_RAID10, 'RAID50': Volume.RAID_TYPE_RAID50, 'RAID60': Volume.RAID_TYPE_RAID60, } _LSM_RAID_TYPE_CONV = { Volume.RAID_TYPE_RAID0: 'RAID0', Volume.RAID_TYPE_RAID1: 'RAID1', Volume.RAID_TYPE_RAID5: 'RAID5', Volume.RAID_TYPE_RAID6: 'RAID6', Volume.RAID_TYPE_RAID50: 'RAID50', Volume.RAID_TYPE_RAID60: 'RAID60', Volume.RAID_TYPE_RAID10: 'RAID10', } def _mega_raid_type_to_lsm(vd_basic_info, vd_prop_info): raid_type = _RAID_TYPE_MAP.get( vd_basic_info['TYPE'], Volume.RAID_TYPE_UNKNOWN) # In LSI, four disks or more RAID1 is actually a RAID10. if raid_type == Volume.RAID_TYPE_RAID1 and \ int(vd_prop_info['Number of Drives Per Span']) >= 4: raid_type = Volume.RAID_TYPE_RAID10 return raid_type def _lsm_raid_type_to_mega(lsm_raid_type): try: return _LSM_RAID_TYPE_CONV[lsm_raid_type] except KeyError: raise LsmError( ErrorNumber.NO_SUPPORT, "RAID type %d not supported" % lsm_raid_type) class MegaRAID(IPlugin): _DEFAULT_BIN_PATHS = [ "/opt/MegaRAID/storcli/storcli64", "/opt/MegaRAID/storcli/storcli", "/opt/MegaRAID/perccli/perccli64", "/opt/MegaRAID/perccli/perccli"] _CMD_JSON_OUTPUT_SWITCH = 'J' def __init__(self): self._storcli_bin = None def _find_storcli(self): """ Try _DEFAULT_BIN_PATHS """ for cur_path in MegaRAID._DEFAULT_BIN_PATHS: if os.path.lexists(cur_path): self._storcli_bin = cur_path if not self._storcli_bin: raise LsmError( ErrorNumber.INVALID_ARGUMENT, "MegaRAID storcli is not installed correctly") @_handle_errors def plugin_register(self, uri, password, timeout, flags=Client.FLAG_RSVD): if os.geteuid() != 0: raise LsmError( ErrorNumber.INVALID_ARGUMENT, "This plugin requires root privilege both daemon and client") uri_parsed = uri_parse(uri) self._storcli_bin = uri_parsed.get('parameters', {}).get('storcli') if not self._storcli_bin: self._find_storcli() # change working dir to "/tmp" as storcli will create a log file # named as 'MegaSAS.log'. os.chdir("/tmp") self._storcli_exec(['-v'], flag_json=False) @_handle_errors def plugin_unregister(self, flags=Client.FLAG_RSVD): pass @_handle_errors def job_status(self, job_id, flags=Client.FLAG_RSVD): raise LsmError(ErrorNumber.NO_SUPPORT, "Not supported yet") @_handle_errors def job_free(self, job_id, flags=Client.FLAG_RSVD): pass @_handle_errors def plugin_info(self, flags=Client.FLAG_RSVD): return "LSI MegaRAID Plugin", VERSION @_handle_errors def time_out_set(self, ms, flags=Client.FLAG_RSVD): raise LsmError(ErrorNumber.NO_SUPPORT, "Not supported yet") @_handle_errors def time_out_get(self, flags=Client.FLAG_RSVD): raise LsmError(ErrorNumber.NO_SUPPORT, "Not supported yet") @_handle_errors def capabilities(self, system, flags=Client.FLAG_RSVD): cur_lsm_syss = self.systems() if system.id not in list(s.id for s in cur_lsm_syss): raise LsmError( ErrorNumber.NOT_FOUND_SYSTEM, "System not found") cap = Capabilities() cap.set(Capabilities.DISKS) cap.set(Capabilities.VOLUMES) cap.set(Capabilities.VOLUME_RAID_INFO) cap.set(Capabilities.POOL_MEMBER_INFO) cap.set(Capabilities.VOLUME_RAID_CREATE) return cap def _storcli_exec(self, storcli_cmds, flag_json=True): storcli_cmds.insert(0, self._storcli_bin) if flag_json: storcli_cmds.append(MegaRAID._CMD_JSON_OUTPUT_SWITCH) try: output = cmd_exec(storcli_cmds) except OSError as os_error: if os_error.errno == errno.ENOENT: raise LsmError( ErrorNumber.INVALID_ARGUMENT, "storcli binary '%s' is not exist or executable." % self._storcli_bin) else: raise output = re.sub("[^\x20-\x7e]", " ", output) if flag_json: output_dict = json.loads(output) ctrl_output = output_dict.get('Controllers') if len(ctrl_output) != 1: raise LsmError( ErrorNumber.PLUGIN_BUG, "_storcli_exec(): Unexpected output from MegaRAID " "storcli: %s" % output_dict) rc_status = ctrl_output[0].get('Command Status') if rc_status.get('Status') != 'Success': raise LsmError( ErrorNumber.PLUGIN_BUG, "MegaRAID storcli failed with error %d: %s" % (rc_status['Status Code'], rc_status['Description'])) real_data = ctrl_output[0].get('Response Data') if real_data and 'Response Data' in real_data.keys(): return real_data['Response Data'] return real_data else: return output def _ctrl_count(self): ctrl_count = self._storcli_exec( ["show", "ctrlcount"]).get("Controller Count") if ctrl_count < 1: raise LsmError( ErrorNumber.NOT_FOUND_SYSTEM, "No MegaRAID controller detected by %s" % self._storcli_bin) return ctrl_count def _lsm_status_of_ctrl(self, ctrl_show_all_output): lsi_status_info = ctrl_show_all_output['Status'] status_info = '' status = System.STATUS_UNKNOWN if lsi_status_info['Controller Status'] == 'Optimal': status = System.STATUS_OK else: # TODO(Gris Ge): Try pull a disk off to check whether this change. status_info = "%s: " % lsi_status_info['Controller Status'] for key_name in lsi_status_info.keys(): if key_name == 'Controller Status': continue if lsi_status_info[key_name] != 0 and \ lsi_status_info[key_name] != 'No' and \ lsi_status_info[key_name] != 'NA': status_info += " %s:%s" % ( key_name, lsi_status_info[key_name]) return status, status_info def _sys_id_of_ctrl_num(self, ctrl_num, ctrl_show_all_output=None): if ctrl_show_all_output is None: return self._storcli_exec( ["/c%d" % ctrl_num, "show"])['Serial Number'] else: return ctrl_show_all_output['Basics']['Serial Number'] @_handle_errors def systems(self, flags=Client.FLAG_RSVD): rc_lsm_syss = [] for ctrl_num in range(self._ctrl_count()): ctrl_show_all_output = self._storcli_exec( ["/c%d" % ctrl_num, "show", "all"]) sys_id = self._sys_id_of_ctrl_num(ctrl_num, ctrl_show_all_output) sys_name = "%s %s %s ver: %s" % ( ctrl_show_all_output['Basics']['Model'], ctrl_show_all_output['Bus']['Host Interface'], ctrl_show_all_output['Basics']['PCI Address'], ctrl_show_all_output['Version']['Firmware Package Build'], ) (status, status_info) = self._lsm_status_of_ctrl( ctrl_show_all_output) plugin_data = "/c%d" % ctrl_num # Since PCI slot sequence might change. # This string just stored for quick system verification. rc_lsm_syss.append( System(sys_id, sys_name, status, status_info, plugin_data)) return rc_lsm_syss @_handle_errors def disks(self, search_key=None, search_value=None, flags=Client.FLAG_RSVD): rc_lsm_disks = [] mega_disk_path_regex = re.compile( r"^Drive (\/c[0-9]+\/e[0-9]+\/s[0-9]+) - Detailed Information$") for ctrl_num in range(self._ctrl_count()): sys_id = self._sys_id_of_ctrl_num(ctrl_num) disk_show_output = self._storcli_exec( ["/c%d/eall/sall" % ctrl_num, "show", "all"]) for drive_name in disk_show_output.keys(): re_match = mega_disk_path_regex.match(drive_name) if not re_match: continue mega_disk_path = re_match.group(1) # Assuming only 1 disk attached to each slot. disk_show_basic_dict = disk_show_output[ "Drive %s" % mega_disk_path][0] disk_show_attr_dict = disk_show_output[drive_name][ 'Drive %s Device attributes' % mega_disk_path] disk_show_stat_dict = disk_show_output[drive_name][ 'Drive %s State' % mega_disk_path] disk_id = disk_show_attr_dict['SN'].strip() disk_name = "Disk %s %s %s" % ( disk_show_basic_dict['DID'], disk_show_attr_dict['Manufacturer Id'].strip(), disk_show_attr_dict['Model Number']) disk_type = _disk_type_of(disk_show_basic_dict) blk_size = size_human_2_size_bytes( disk_show_basic_dict['SeSz']) blk_count = _blk_count_of(disk_show_attr_dict['Coerced size']) status = _disk_status_of( disk_show_basic_dict, disk_show_stat_dict) plugin_data = "%s:%s" % ( ctrl_num, disk_show_basic_dict['EID:Slt']) rc_lsm_disks.append( Disk( disk_id, disk_name, disk_type, blk_size, blk_count, status, sys_id, plugin_data)) return search_property(rc_lsm_disks, search_key, search_value) @staticmethod def _dg_free_size(dg_num, free_space_list): """ Get information from 'FREE SPACE DETAILS' of /c0/dall show all. """ for free_space in free_space_list: if int(free_space['DG']) == int(dg_num): return _mega_size_to_lsm(free_space['Size']) return 0 def _dg_top_to_lsm_pool(self, dg_top, free_space_list, ctrl_num): sys_id = self._sys_id_of_ctrl_num(ctrl_num) pool_id = _pool_id_of(dg_top['DG'], sys_id) name = '%s Disk Group %s' % (dg_top['Type'], dg_top['DG']) elem_type = Pool.ELEMENT_TYPE_VOLUME | Pool.ELEMENT_TYPE_VOLUME_FULL unsupported_actions = 0 # TODO(Gris Ge): contact LSI to get accurate total space and free # space. The size we are using here is not what host # got. total_space = _mega_size_to_lsm(dg_top['Size']) free_space = MegaRAID._dg_free_size(dg_top['DG'], free_space_list) status = _pool_status_of(dg_top) status_info = '' if status == Pool.STATUS_UNKNOWN: status_info = dg_top['State'] plugin_data = "/c%d/d%s" % (ctrl_num, dg_top['DG']) return Pool( pool_id, name, elem_type, unsupported_actions, total_space, free_space, status, status_info, sys_id, plugin_data) @_handle_errors def pools(self, search_key=None, search_value=None, flags=Client.FLAG_RSVD): lsm_pools = [] for ctrl_num in range(self._ctrl_count()): dg_show_output = self._storcli_exec( ["/c%d/dall" % ctrl_num, "show", "all"]) free_space_list = dg_show_output.get('FREE SPACE DETAILS', []) for dg_top in dg_show_output['TOPOLOGY']: if dg_top['Arr'] != '-': continue if dg_top['DG'] == '-': continue lsm_pools.append( self._dg_top_to_lsm_pool( dg_top, free_space_list, ctrl_num)) return search_property(lsm_pools, search_key, search_value) @staticmethod def _vd_to_lsm_vol(vd_id, dg_id, sys_id, vd_basic_info, vd_pd_info_list, vd_prop_info, vd_path): vol_id = "%s:VD%d" % (sys_id, vd_id) name = "VD %d" % vd_id vpd83 = '' # TODO(Gris Ge): Beg LSI to provide this information. block_size = size_human_2_size_bytes(vd_pd_info_list[0]['SeSz']) num_of_blocks = vd_prop_info['Number of Blocks'] admin_state = Volume.ADMIN_STATE_ENABLED if vd_prop_info['Exposed to OS'] != 'Yes' or \ vd_basic_info['Access'] != 'RW': admin_state = Volume.ADMIN_STATE_DISABLED pool_id = _pool_id_of(dg_id, sys_id) plugin_data = vd_path return Volume( vol_id, name, vpd83, block_size, num_of_blocks, admin_state, sys_id, pool_id, plugin_data) @_handle_errors def volumes(self, search_key=None, search_value=None, flags=Client.FLAG_RSVD): lsm_vols = [] for ctrl_num in range(self._ctrl_count()): vol_show_output = self._storcli_exec( ["/c%d/vall" % ctrl_num, "show", "all"]) sys_id = self._sys_id_of_ctrl_num(ctrl_num) if vol_show_output is None or len(vol_show_output) == 0: continue for key_name in vol_show_output.keys(): if key_name.startswith('/c'): vd_basic_info = vol_show_output[key_name][0] (dg_id, vd_id) = vd_basic_info['DG/VD'].split('/') dg_id = int(dg_id) vd_id = int(vd_id) vd_pd_info_list = vol_show_output['PDs for VD %d' % vd_id] vd_prop_info = vol_show_output['VD%d Properties' % vd_id] lsm_vols.append( MegaRAID._vd_to_lsm_vol( vd_id, dg_id, sys_id, vd_basic_info, vd_pd_info_list, vd_prop_info, key_name)) return search_property(lsm_vols, search_key, search_value) @_handle_errors def volume_raid_info(self, volume, flags=Client.FLAG_RSVD): if not volume.plugin_data: raise LsmError( ErrorNumber.INVALID_ARGUMENT, "Ilegal input volume argument: missing plugin_data property") vd_path = volume.plugin_data vol_show_output = self._storcli_exec([vd_path, "show", "all"]) vd_basic_info = vol_show_output[vd_path][0] vd_id = int(vd_basic_info['DG/VD'].split('/')[-1]) vd_prop_info = vol_show_output['VD%d Properties' % vd_id] raid_type = _mega_raid_type_to_lsm(vd_basic_info, vd_prop_info) strip_size = _mega_size_to_lsm(vd_prop_info['Strip Size']) disk_count = ( int(vd_prop_info['Number of Drives Per Span']) * int(vd_prop_info['Span Depth'])) if raid_type == Volume.RAID_TYPE_RAID0: strip_count = disk_count elif raid_type == Volume.RAID_TYPE_RAID1: strip_count = 1 elif raid_type == Volume.RAID_TYPE_RAID5: strip_count = disk_count - 1 elif raid_type == Volume.RAID_TYPE_RAID6: strip_count = disk_count - 2 elif raid_type == Volume.RAID_TYPE_RAID50: strip_count = ( (int(vd_prop_info['Number of Drives Per Span']) - 1) * int(vd_prop_info['Span Depth'])) elif raid_type == Volume.RAID_TYPE_RAID60: strip_count = ( (int(vd_prop_info['Number of Drives Per Span']) - 2) * int(vd_prop_info['Span Depth'])) elif raid_type == Volume.RAID_TYPE_RAID10: strip_count = ( int(vd_prop_info['Number of Drives Per Span']) / 2 * int(vd_prop_info['Span Depth'])) else: # MegaRAID does not support 15 or 16 yet. raise LsmError( ErrorNumber.PLUGIN_BUG, "volume_raid_info(): Got unexpected RAID type: %s" % vd_basic_info['TYPE']) return [ raid_type, strip_size, disk_count, strip_size, strip_size * strip_count] @_handle_errors def pool_member_info(self, pool, flags=Client.FLAG_RSVD): lsi_dg_path = pool.plugin_data # Check whether pool exists. try: dg_show_all_output = self._storcli_exec( [lsi_dg_path, "show", "all"]) except ExecError as exec_error: try: json_output = json.loads(exec_error.stdout) detail_error = json_output[ 'Controllers'][0]['Command Status']['Detailed Status'] except Exception: raise exec_error if detail_error and detail_error[0]['Status'] == 'Not found': raise LsmError( ErrorNumber.NOT_FOUND_POOL, "Pool not found") raise ctrl_num = lsi_dg_path.split('/')[1][1:] lsm_disk_map = {} disk_ids = [] for lsm_disk in self.disks(): lsm_disk_map[lsm_disk.plugin_data] = lsm_disk.id for dg_disk_info in dg_show_all_output['DG Drive LIST']: cur_lsi_disk_id = "%s:%s" % (ctrl_num, dg_disk_info['EID:Slt']) if cur_lsi_disk_id in lsm_disk_map.keys(): disk_ids.append(lsm_disk_map[cur_lsi_disk_id]) else: raise LsmError( ErrorNumber.PLUGIN_BUG, "pool_member_info(): Failed to find disk id of %s" % cur_lsi_disk_id) raid_type = Volume.RAID_TYPE_UNKNOWN dg_num = lsi_dg_path.split('/')[2][1:] for dg_top in dg_show_all_output['TOPOLOGY']: if dg_top['Arr'] == '-' and \ dg_top['Row'] == '-' and \ int(dg_top['DG']) == int(dg_num): raid_type = _RAID_TYPE_MAP.get( dg_top['Type'], Volume.RAID_TYPE_UNKNOWN) break if raid_type == Volume.RAID_TYPE_RAID1 and len(disk_ids) >= 4: raid_type = Volume.RAID_TYPE_RAID10 return raid_type, Pool.MEMBER_TYPE_DISK, disk_ids def _vcr_cap_get(self, mega_sys_path): cap_output = self._storcli_exec( [mega_sys_path, "show", "all"])['Capabilities'] mega_raid_types = \ cap_output['RAID Level Supported'].replace(', \n', '').split(', ') supported_raid_types = [] for cur_mega_raid_type in _RAID_TYPE_MAP.keys(): if cur_mega_raid_type in mega_raid_types: supported_raid_types.append( _RAID_TYPE_MAP[cur_mega_raid_type]) supported_raid_types = sorted(list(set(supported_raid_types))) min_strip_size = _mega_size_to_lsm(cap_output['Min Strip Size']) max_strip_size = _mega_size_to_lsm(cap_output['Max Strip Size']) supported_strip_sizes = list( min_strip_size * (2 ** i) for i in range( 0, int(math.log(max_strip_size / min_strip_size, 2) + 1))) # ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ # The math above is to generate a list like: # min_strip_size, ... n^2 , max_strip_size return supported_raid_types, supported_strip_sizes @_handle_errors def volume_raid_create_cap_get(self, system, flags=Client.FLAG_RSVD): """ Depend on the 'Capabilities' section of "storcli /c0 show all" output. """ cur_lsm_syss = list(s for s in self.systems() if s.id == system.id) if len(cur_lsm_syss) != 1: raise LsmError( ErrorNumber.NOT_FOUND_SYSTEM, "System not found") lsm_sys = cur_lsm_syss[0] return self._vcr_cap_get(lsm_sys.plugin_data) @_handle_errors def volume_raid_create(self, name, raid_type, disks, strip_size, flags=Client.FLAG_RSVD): """ Work flow: 1. Create RAID volume storcli /c0 add vd RAID10 drives=252:1-4 pdperarray=2 J 2. Find out pool/DG base on one disk. storcli /c0/e252/s1 show J 3. Find out the volume/VD base on pool/DG using self.volumes() """ mega_raid_type = _lsm_raid_type_to_mega(raid_type) ctrl_num = None slot_nums = [] enclosure_num = None for disk in disks: if not disk.plugin_data: raise LsmError( ErrorNumber.INVALID_ARGUMENT, "Illegal input disks argument: missing plugin_data " "property") # Disk should from the same controller. (cur_ctrl_num, cur_enclosure_num, slot_num) = \ disk.plugin_data.split(':') if ctrl_num and cur_ctrl_num != ctrl_num: raise LsmError( ErrorNumber.INVALID_ARGUMENT, "Illegal input disks argument: disks are not from the " "same controller/system.") if enclosure_num and cur_enclosure_num != enclosure_num: raise LsmError( ErrorNumber.INVALID_ARGUMENT, "Illegal input disks argument: disks are not from the " "same disk enclosure.") ctrl_num = int(cur_ctrl_num) enclosure_num = cur_enclosure_num slot_nums.append(slot_num) # Handle request volume name, LSI only allow 15 characters. name = re.sub('[^0-9a-zA-Z_\-]+', '', name)[:15] cmds = [ "/c%s" % ctrl_num, "add", "vd", mega_raid_type, 'size=all', "name=%s" % name, "drives=%s:%s" % (enclosure_num, ','.join(slot_nums))] if raid_type == Volume.RAID_TYPE_RAID10 or \ raid_type == Volume.RAID_TYPE_RAID50 or \ raid_type == Volume.RAID_TYPE_RAID60: cmds.append("pdperarray=%d" % int(len(disks) / 2)) if strip_size != Volume.VCR_STRIP_SIZE_DEFAULT: cmds.append("strip=%d" % int(strip_size / 1024)) try: self._storcli_exec(cmds) except ExecError: req_disk_ids = [d.id for d in disks] for cur_disk in self.disks(): if cur_disk.id in req_disk_ids and \ not cur_disk.status & Disk.STATUS_FREE: raise LsmError( ErrorNumber.DISK_NOT_FREE, "Disk %s is not in STATUS_FREE state" % cur_disk.id) # Check whether got unsupported RAID type or stripe size supported_raid_types, supported_strip_sizes = \ self._vcr_cap_get("/c%s" % ctrl_num) if raid_type not in supported_raid_types: raise LsmError( ErrorNumber.NO_SUPPORT, "Provided 'raid_type' is not supported") if strip_size != Volume.VCR_STRIP_SIZE_DEFAULT and \ strip_size not in supported_strip_sizes: raise LsmError( ErrorNumber.NO_SUPPORT, "Provided 'strip_size' is not supported") raise # Find out the DG ID from one disk. dg_show_output = self._storcli_exec( ["/c%s/e%s/s%s" % tuple(disks[0].plugin_data.split(":")), "show"]) dg_id = dg_show_output['Drive Information'][0]['DG'] if dg_id == '-': raise LsmError( ErrorNumber.PLUGIN_BUG, "volume_raid_create(): No error found in output, " "but RAID is not created: %s" % dg_show_output.items()) else: dg_id = int(dg_id) pool_id = _pool_id_of(dg_id, self._sys_id_of_ctrl_num(ctrl_num)) lsm_vols = self.volumes(search_key='pool_id', search_value=pool_id) if len(lsm_vols) != 1: raise LsmError( ErrorNumber.PLUGIN_BUG, "volume_raid_create(): Got unexpected volume count(not 1) " "when creating RAID volume") return lsm_vols[0] libstoragemgmt-1.2.3/plugin/__init__.py0000664000175000017500000000000012537546123015065 00000000000000libstoragemgmt-1.2.3/plugin/Makefile.in0000664000175000017500000010315012542455445015035 00000000000000# Makefile.in generated by automake 1.13.4 from Makefile.am. # @configure_input@ # Copyright (C) 1994-2013 Free Software Foundation, Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__is_gnu_make = test -n '$(MAKEFILE_LIST)' && test -n '$(MAKELEVEL)' am__make_running_with_option = \ case $${target_option-} in \ ?) ;; \ *) echo "am__make_running_with_option: internal error: invalid" \ "target option '$${target_option-}' specified" >&2; \ exit 1;; \ esac; \ has_opt=no; \ sane_makeflags=$$MAKEFLAGS; \ if $(am__is_gnu_make); then \ sane_makeflags=$$MFLAGS; \ else \ case $$MAKEFLAGS in \ *\\[\ \ ]*) \ bs=\\; \ sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \ | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \ esac; \ fi; \ skip_next=no; \ strip_trailopt () \ { \ flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \ }; \ for flg in $$sane_makeflags; do \ test $$skip_next = yes && { skip_next=no; continue; }; \ case $$flg in \ *=*|--*) continue;; \ -*I) strip_trailopt 'I'; skip_next=yes;; \ -*I?*) strip_trailopt 'I';; \ -*O) strip_trailopt 'O'; skip_next=yes;; \ -*O?*) strip_trailopt 'O';; \ -*l) strip_trailopt 'l'; skip_next=yes;; \ -*l?*) strip_trailopt 'l';; \ -[dEDm]) skip_next=yes;; \ -[JT]) skip_next=yes;; \ esac; \ case $$flg in \ *$$target_option*) has_opt=yes; break;; \ esac; \ done; \ test $$has_opt = yes am__make_dryrun = (target_option=n; $(am__make_running_with_option)) am__make_keepgoing = (target_option=k; $(am__make_running_with_option)) pkgdatadir = $(datadir)/@PACKAGE@ pkgincludedir = $(includedir)/@PACKAGE@ pkglibdir = $(libdir)/@PACKAGE@ pkglibexecdir = $(libexecdir)/@PACKAGE@ am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ subdir = plugin DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/Makefile.am \ $(dist_bin_SCRIPTS) $(nstor_PYTHON) $(ontap_PYTHON) \ $(plugin_PYTHON) $(sim_PYTHON) $(smispy_PYTHON) \ $(targetd_PYTHON) $(top_srcdir)/build-aux/py-compile ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = $(top_srcdir)/m4/ax_python_module.m4 \ $(top_srcdir)/m4/libtool.m4 $(top_srcdir)/m4/ltoptions.m4 \ $(top_srcdir)/m4/ltsugar.m4 $(top_srcdir)/m4/ltversion.m4 \ $(top_srcdir)/m4/lt~obsolete.m4 $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) mkinstalldirs = $(install_sh) -d CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = CONFIG_CLEAN_VPATH_FILES = am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; am__vpath_adj = case $$p in \ $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ *) f=$$p;; \ esac; am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`; am__install_max = 40 am__nobase_strip_setup = \ srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'` am__nobase_strip = \ for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||" am__nobase_list = $(am__nobase_strip_setup); \ for p in $$list; do echo "$$p $$p"; done | \ sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \ $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \ if (++n[$$2] == $(am__install_max)) \ { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \ END { for (dir in files) print dir, files[dir] }' am__base_list = \ sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \ sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g' am__uninstall_files_from_dir = { \ test -z "$$files" \ || { test ! -d "$$dir" && test ! -f "$$dir" && test ! -r "$$dir"; } \ || { echo " ( cd '$$dir' && rm -f" $$files ")"; \ $(am__cd) "$$dir" && rm -f $$files; }; \ } am__installdirs = "$(DESTDIR)$(bindir)" "$(DESTDIR)$(nstordir)" \ "$(DESTDIR)$(ontapdir)" "$(DESTDIR)$(plugindir)" \ "$(DESTDIR)$(simdir)" "$(DESTDIR)$(smispydir)" \ "$(DESTDIR)$(targetddir)" SCRIPTS = $(dist_bin_SCRIPTS) AM_V_P = $(am__v_P_@AM_V@) am__v_P_ = $(am__v_P_@AM_DEFAULT_V@) am__v_P_0 = false am__v_P_1 = : AM_V_GEN = $(am__v_GEN_@AM_V@) am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@) am__v_GEN_0 = @echo " GEN " $@; am__v_GEN_1 = AM_V_at = $(am__v_at_@AM_V@) am__v_at_ = $(am__v_at_@AM_DEFAULT_V@) am__v_at_0 = @ am__v_at_1 = SOURCES = DIST_SOURCES = RECURSIVE_TARGETS = all-recursive check-recursive cscopelist-recursive \ ctags-recursive dvi-recursive html-recursive info-recursive \ install-data-recursive install-dvi-recursive \ install-exec-recursive install-html-recursive \ install-info-recursive install-pdf-recursive \ install-ps-recursive install-recursive installcheck-recursive \ installdirs-recursive pdf-recursive ps-recursive \ tags-recursive uninstall-recursive am__can_run_installinfo = \ case $$AM_UPDATE_INFO_DIR in \ n|no|NO) false;; \ *) (install-info --version) >/dev/null 2>&1;; \ esac am__py_compile = PYTHON=$(PYTHON) $(SHELL) $(py_compile) am__pep3147_tweak = \ sed -e 's|\.py$$||' -e 's|[^/]*$$|__pycache__/&.*.py|' py_compile = $(top_srcdir)/build-aux/py-compile RECURSIVE_CLEAN_TARGETS = mostlyclean-recursive clean-recursive \ distclean-recursive maintainer-clean-recursive am__recursive_targets = \ $(RECURSIVE_TARGETS) \ $(RECURSIVE_CLEAN_TARGETS) \ $(am__extra_recursive_targets) AM_RECURSIVE_TARGETS = $(am__recursive_targets:-recursive=) TAGS CTAGS \ distdir am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) # Read a list of newline-separated strings from the standard input, # and print each of them once, without duplicates. Input order is # *not* preserved. am__uniquify_input = $(AWK) '\ BEGIN { nonempty = 0; } \ { items[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in items) print i; }; } \ ' # Make sure the list of sources is unique. This is necessary because, # e.g., the same source file might be shared among _SOURCES variables # for different programs/libraries. am__define_uniq_tagged_files = \ list='$(am__tagged_files)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | $(am__uniquify_input)` ETAGS = etags CTAGS = ctags DIST_SUBDIRS = $(SUBDIRS) DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) am__relativize = \ dir0=`pwd`; \ sed_first='s,^\([^/]*\)/.*$$,\1,'; \ sed_rest='s,^[^/]*/*,,'; \ sed_last='s,^.*/\([^/]*\)$$,\1,'; \ sed_butlast='s,/*[^/]*$$,,'; \ while test -n "$$dir1"; do \ first=`echo "$$dir1" | sed -e "$$sed_first"`; \ if test "$$first" != "."; then \ if test "$$first" = ".."; then \ dir2=`echo "$$dir0" | sed -e "$$sed_last"`/"$$dir2"; \ dir0=`echo "$$dir0" | sed -e "$$sed_butlast"`; \ else \ first2=`echo "$$dir2" | sed -e "$$sed_first"`; \ if test "$$first2" = "$$first"; then \ dir2=`echo "$$dir2" | sed -e "$$sed_rest"`; \ else \ dir2="../$$dir2"; \ fi; \ dir0="$$dir0"/"$$first"; \ fi; \ fi; \ dir1=`echo "$$dir1" | sed -e "$$sed_rest"`; \ done; \ reldir="$$dir2" ACLOCAL = @ACLOCAL@ AMTAR = @AMTAR@ AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@ AR = @AR@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ FGREP = @FGREP@ GREP = @GREP@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ JSON_CFLAGS = @JSON_CFLAGS@ JSON_LIBS = @JSON_LIBS@ LD = @LD@ LDFLAGS = @LDFLAGS@ LIBCHECK_CFLAGS = @LIBCHECK_CFLAGS@ LIBCHECK_LIBS = @LIBCHECK_LIBS@ LIBCONFIG_CFLAGS = @LIBCONFIG_CFLAGS@ LIBCONFIG_LIBS = @LIBCONFIG_LIBS@ LIBGLIB_CFLAGS = @LIBGLIB_CFLAGS@ LIBGLIB_LIBS = @LIBGLIB_LIBS@ LIBMICROHTTPD_CFLAGS = @LIBMICROHTTPD_CFLAGS@ LIBMICROHTTPD_LIBS = @LIBMICROHTTPD_LIBS@ LIBOBJS = @LIBOBJS@ LIBS = @LIBS@ LIBSM_LIBTOOL_VERSION = @LIBSM_LIBTOOL_VERSION@ LIBSM_MAJOR_VERSION = @LIBSM_MAJOR_VERSION@ LIBSM_MICRO_VERSION = @LIBSM_MICRO_VERSION@ LIBSM_MINOR_VERSION = @LIBSM_MINOR_VERSION@ LIBSM_VERSION = @LIBSM_VERSION@ LIBSM_VERSION_INFO = @LIBSM_VERSION_INFO@ LIBSM_VERSION_NUMBER = @LIBSM_VERSION_NUMBER@ LIBTOOL = @LIBTOOL@ LIBXML_CFLAGS = @LIBXML_CFLAGS@ LIBXML_LIBS = @LIBXML_LIBS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBOBJS = @LTLIBOBJS@ MAINT = @MAINT@ MAKEINFO = @MAKEINFO@ MANIFEST_TOOL = @MANIFEST_TOOL@ MKDIR_P = @MKDIR_P@ NM = @NM@ NMEDIT = @NMEDIT@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_URL = @PACKAGE_URL@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PERL = @PERL@ PKG_CONFIG = @PKG_CONFIG@ PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@ PKG_CONFIG_PATH = @PKG_CONFIG_PATH@ PYTHON = @PYTHON@ PYTHON_EXEC_PREFIX = @PYTHON_EXEC_PREFIX@ PYTHON_PLATFORM = @PYTHON_PLATFORM@ PYTHON_PREFIX = @PYTHON_PREFIX@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SSL_LIBS = @SSL_LIBS@ STRIP = @STRIP@ VERSION = @VERSION@ YAJL_LIBS = @YAJL_LIBS@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_AR = @ac_ct_AR@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ bashcompletiondir = @bashcompletiondir@ bindir = @bindir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ datadir = @datadir@ datarootdir = @datarootdir@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ install_sh = @install_sh@ libdir = @libdir@ libexecdir = @libexecdir@ localedir = @localedir@ localstatedir = @localstatedir@ mandir = @mandir@ mkdir_p = @mkdir_p@ oldincludedir = @oldincludedir@ pdfdir = @pdfdir@ pkgpyexecdir = @pkgpyexecdir@ pkgpythondir = @pkgpythondir@ prefix = @prefix@ program_transform_name = @program_transform_name@ psdir = @psdir@ pyexecdir = @pyexecdir@ pythondir = @pythondir@ sbindir = @sbindir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ systemdsystemunitdir = @systemdsystemunitdir@ target_alias = @target_alias@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ SUBDIRS = simc megaraid hpsa plugindir = $(pythondir)/lsm/plugin plugin_PYTHON = __init__.py simdir = $(plugindir)/sim sim_PYTHON = \ sim/__init__.py \ sim/simulator.py \ sim/simarray.py targetddir = $(plugindir)/targetd targetd_PYTHON = \ targetd/__init__.py \ targetd/targetd.py ontapdir = $(plugindir)/ontap ontap_PYTHON = \ ontap/__init__.py \ ontap/ontap.py \ ontap/na.py smispydir = $(plugindir)/smispy smispy_PYTHON = \ smispy/__init__.py \ smispy/smis.py \ smispy/utils.py \ smispy/smis_common.py \ smispy/dmtf.py \ smispy/smis_cap.py \ smispy/smis_sys.py \ smispy/smis_pool.py \ smispy/smis_disk.py \ smispy/smis_ag.py \ smispy/smis_vol.py nstordir = $(plugindir)/nstor nstor_PYTHON = \ nstor/__init__.py \ nstor/nstor.py dist_bin_SCRIPTS = \ sim/sim_lsmplugin \ smispy/smispy_lsmplugin \ nstor/nstor_lsmplugin \ ontap/ontap_lsmplugin \ targetd/targetd_lsmplugin all: all-recursive .SUFFIXES: $(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu plugin/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --gnu plugin/Makefile .PRECIOUS: Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): install-dist_binSCRIPTS: $(dist_bin_SCRIPTS) @$(NORMAL_INSTALL) @list='$(dist_bin_SCRIPTS)'; test -n "$(bindir)" || list=; \ if test -n "$$list"; then \ echo " $(MKDIR_P) '$(DESTDIR)$(bindir)'"; \ $(MKDIR_P) "$(DESTDIR)$(bindir)" || exit 1; \ fi; \ for p in $$list; do \ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ if test -f "$$d$$p"; then echo "$$d$$p"; echo "$$p"; else :; fi; \ done | \ sed -e 'p;s,.*/,,;n' \ -e 'h;s|.*|.|' \ -e 'p;x;s,.*/,,;$(transform)' | sed 'N;N;N;s,\n, ,g' | \ $(AWK) 'BEGIN { files["."] = ""; dirs["."] = 1; } \ { d=$$3; if (dirs[d] != 1) { print "d", d; dirs[d] = 1 } \ if ($$2 == $$4) { files[d] = files[d] " " $$1; \ if (++n[d] == $(am__install_max)) { \ print "f", d, files[d]; n[d] = 0; files[d] = "" } } \ else { print "f", d "/" $$4, $$1 } } \ END { for (d in files) print "f", d, files[d] }' | \ while read type dir files; do \ if test "$$dir" = .; then dir=; else dir=/$$dir; fi; \ test -z "$$files" || { \ echo " $(INSTALL_SCRIPT) $$files '$(DESTDIR)$(bindir)$$dir'"; \ $(INSTALL_SCRIPT) $$files "$(DESTDIR)$(bindir)$$dir" || exit $$?; \ } \ ; done uninstall-dist_binSCRIPTS: @$(NORMAL_UNINSTALL) @list='$(dist_bin_SCRIPTS)'; test -n "$(bindir)" || exit 0; \ files=`for p in $$list; do echo "$$p"; done | \ sed -e 's,.*/,,;$(transform)'`; \ dir='$(DESTDIR)$(bindir)'; $(am__uninstall_files_from_dir) mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs install-nstorPYTHON: $(nstor_PYTHON) @$(NORMAL_INSTALL) @list='$(nstor_PYTHON)'; dlist=; list2=; test -n "$(nstordir)" || list=; \ if test -n "$$list"; then \ echo " $(MKDIR_P) '$(DESTDIR)$(nstordir)'"; \ $(MKDIR_P) "$(DESTDIR)$(nstordir)" || exit 1; \ fi; \ for p in $$list; do \ if test -f "$$p"; then b=; else b="$(srcdir)/"; fi; \ if test -f $$b$$p; then \ $(am__strip_dir) \ dlist="$$dlist $$f"; \ list2="$$list2 $$b$$p"; \ else :; fi; \ done; \ for file in $$list2; do echo $$file; done | $(am__base_list) | \ while read files; do \ echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(nstordir)'"; \ $(INSTALL_DATA) $$files "$(DESTDIR)$(nstordir)" || exit $$?; \ done || exit $$?; \ if test -n "$$dlist"; then \ $(am__py_compile) --destdir "$(DESTDIR)" \ --basedir "$(nstordir)" $$dlist; \ else :; fi uninstall-nstorPYTHON: @$(NORMAL_UNINSTALL) @list='$(nstor_PYTHON)'; test -n "$(nstordir)" || list=; \ py_files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ test -n "$$py_files" || exit 0; \ dir='$(DESTDIR)$(nstordir)'; \ pyc_files=`echo "$$py_files" | sed 's|$$|c|'`; \ pyo_files=`echo "$$py_files" | sed 's|$$|o|'`; \ py_files_pep3147=`echo "$$py_files" | $(am__pep3147_tweak)`; \ echo "$$py_files_pep3147";\ pyc_files_pep3147=`echo "$$py_files_pep3147" | sed 's|$$|c|'`; \ pyo_files_pep3147=`echo "$$py_files_pep3147" | sed 's|$$|o|'`; \ st=0; \ for files in \ "$$py_files" \ "$$pyc_files" \ "$$pyo_files" \ "$$pyc_files_pep3147" \ "$$pyo_files_pep3147" \ ; do \ $(am__uninstall_files_from_dir) || st=$$?; \ done; \ exit $$st install-ontapPYTHON: $(ontap_PYTHON) @$(NORMAL_INSTALL) @list='$(ontap_PYTHON)'; dlist=; list2=; test -n "$(ontapdir)" || list=; \ if test -n "$$list"; then \ echo " $(MKDIR_P) '$(DESTDIR)$(ontapdir)'"; \ $(MKDIR_P) "$(DESTDIR)$(ontapdir)" || exit 1; \ fi; \ for p in $$list; do \ if test -f "$$p"; then b=; else b="$(srcdir)/"; fi; \ if test -f $$b$$p; then \ $(am__strip_dir) \ dlist="$$dlist $$f"; \ list2="$$list2 $$b$$p"; \ else :; fi; \ done; \ for file in $$list2; do echo $$file; done | $(am__base_list) | \ while read files; do \ echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(ontapdir)'"; \ $(INSTALL_DATA) $$files "$(DESTDIR)$(ontapdir)" || exit $$?; \ done || exit $$?; \ if test -n "$$dlist"; then \ $(am__py_compile) --destdir "$(DESTDIR)" \ --basedir "$(ontapdir)" $$dlist; \ else :; fi uninstall-ontapPYTHON: @$(NORMAL_UNINSTALL) @list='$(ontap_PYTHON)'; test -n "$(ontapdir)" || list=; \ py_files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ test -n "$$py_files" || exit 0; \ dir='$(DESTDIR)$(ontapdir)'; \ pyc_files=`echo "$$py_files" | sed 's|$$|c|'`; \ pyo_files=`echo "$$py_files" | sed 's|$$|o|'`; \ py_files_pep3147=`echo "$$py_files" | $(am__pep3147_tweak)`; \ echo "$$py_files_pep3147";\ pyc_files_pep3147=`echo "$$py_files_pep3147" | sed 's|$$|c|'`; \ pyo_files_pep3147=`echo "$$py_files_pep3147" | sed 's|$$|o|'`; \ st=0; \ for files in \ "$$py_files" \ "$$pyc_files" \ "$$pyo_files" \ "$$pyc_files_pep3147" \ "$$pyo_files_pep3147" \ ; do \ $(am__uninstall_files_from_dir) || st=$$?; \ done; \ exit $$st install-pluginPYTHON: $(plugin_PYTHON) @$(NORMAL_INSTALL) @list='$(plugin_PYTHON)'; dlist=; list2=; test -n "$(plugindir)" || list=; \ if test -n "$$list"; then \ echo " $(MKDIR_P) '$(DESTDIR)$(plugindir)'"; \ $(MKDIR_P) "$(DESTDIR)$(plugindir)" || exit 1; \ fi; \ for p in $$list; do \ if test -f "$$p"; then b=; else b="$(srcdir)/"; fi; \ if test -f $$b$$p; then \ $(am__strip_dir) \ dlist="$$dlist $$f"; \ list2="$$list2 $$b$$p"; \ else :; fi; \ done; \ for file in $$list2; do echo $$file; done | $(am__base_list) | \ while read files; do \ echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(plugindir)'"; \ $(INSTALL_DATA) $$files "$(DESTDIR)$(plugindir)" || exit $$?; \ done || exit $$?; \ if test -n "$$dlist"; then \ $(am__py_compile) --destdir "$(DESTDIR)" \ --basedir "$(plugindir)" $$dlist; \ else :; fi uninstall-pluginPYTHON: @$(NORMAL_UNINSTALL) @list='$(plugin_PYTHON)'; test -n "$(plugindir)" || list=; \ py_files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ test -n "$$py_files" || exit 0; \ dir='$(DESTDIR)$(plugindir)'; \ pyc_files=`echo "$$py_files" | sed 's|$$|c|'`; \ pyo_files=`echo "$$py_files" | sed 's|$$|o|'`; \ py_files_pep3147=`echo "$$py_files" | $(am__pep3147_tweak)`; \ echo "$$py_files_pep3147";\ pyc_files_pep3147=`echo "$$py_files_pep3147" | sed 's|$$|c|'`; \ pyo_files_pep3147=`echo "$$py_files_pep3147" | sed 's|$$|o|'`; \ st=0; \ for files in \ "$$py_files" \ "$$pyc_files" \ "$$pyo_files" \ "$$pyc_files_pep3147" \ "$$pyo_files_pep3147" \ ; do \ $(am__uninstall_files_from_dir) || st=$$?; \ done; \ exit $$st install-simPYTHON: $(sim_PYTHON) @$(NORMAL_INSTALL) @list='$(sim_PYTHON)'; dlist=; list2=; test -n "$(simdir)" || list=; \ if test -n "$$list"; then \ echo " $(MKDIR_P) '$(DESTDIR)$(simdir)'"; \ $(MKDIR_P) "$(DESTDIR)$(simdir)" || exit 1; \ fi; \ for p in $$list; do \ if test -f "$$p"; then b=; else b="$(srcdir)/"; fi; \ if test -f $$b$$p; then \ $(am__strip_dir) \ dlist="$$dlist $$f"; \ list2="$$list2 $$b$$p"; \ else :; fi; \ done; \ for file in $$list2; do echo $$file; done | $(am__base_list) | \ while read files; do \ echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(simdir)'"; \ $(INSTALL_DATA) $$files "$(DESTDIR)$(simdir)" || exit $$?; \ done || exit $$?; \ if test -n "$$dlist"; then \ $(am__py_compile) --destdir "$(DESTDIR)" \ --basedir "$(simdir)" $$dlist; \ else :; fi uninstall-simPYTHON: @$(NORMAL_UNINSTALL) @list='$(sim_PYTHON)'; test -n "$(simdir)" || list=; \ py_files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ test -n "$$py_files" || exit 0; \ dir='$(DESTDIR)$(simdir)'; \ pyc_files=`echo "$$py_files" | sed 's|$$|c|'`; \ pyo_files=`echo "$$py_files" | sed 's|$$|o|'`; \ py_files_pep3147=`echo "$$py_files" | $(am__pep3147_tweak)`; \ echo "$$py_files_pep3147";\ pyc_files_pep3147=`echo "$$py_files_pep3147" | sed 's|$$|c|'`; \ pyo_files_pep3147=`echo "$$py_files_pep3147" | sed 's|$$|o|'`; \ st=0; \ for files in \ "$$py_files" \ "$$pyc_files" \ "$$pyo_files" \ "$$pyc_files_pep3147" \ "$$pyo_files_pep3147" \ ; do \ $(am__uninstall_files_from_dir) || st=$$?; \ done; \ exit $$st install-smispyPYTHON: $(smispy_PYTHON) @$(NORMAL_INSTALL) @list='$(smispy_PYTHON)'; dlist=; list2=; test -n "$(smispydir)" || list=; \ if test -n "$$list"; then \ echo " $(MKDIR_P) '$(DESTDIR)$(smispydir)'"; \ $(MKDIR_P) "$(DESTDIR)$(smispydir)" || exit 1; \ fi; \ for p in $$list; do \ if test -f "$$p"; then b=; else b="$(srcdir)/"; fi; \ if test -f $$b$$p; then \ $(am__strip_dir) \ dlist="$$dlist $$f"; \ list2="$$list2 $$b$$p"; \ else :; fi; \ done; \ for file in $$list2; do echo $$file; done | $(am__base_list) | \ while read files; do \ echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(smispydir)'"; \ $(INSTALL_DATA) $$files "$(DESTDIR)$(smispydir)" || exit $$?; \ done || exit $$?; \ if test -n "$$dlist"; then \ $(am__py_compile) --destdir "$(DESTDIR)" \ --basedir "$(smispydir)" $$dlist; \ else :; fi uninstall-smispyPYTHON: @$(NORMAL_UNINSTALL) @list='$(smispy_PYTHON)'; test -n "$(smispydir)" || list=; \ py_files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ test -n "$$py_files" || exit 0; \ dir='$(DESTDIR)$(smispydir)'; \ pyc_files=`echo "$$py_files" | sed 's|$$|c|'`; \ pyo_files=`echo "$$py_files" | sed 's|$$|o|'`; \ py_files_pep3147=`echo "$$py_files" | $(am__pep3147_tweak)`; \ echo "$$py_files_pep3147";\ pyc_files_pep3147=`echo "$$py_files_pep3147" | sed 's|$$|c|'`; \ pyo_files_pep3147=`echo "$$py_files_pep3147" | sed 's|$$|o|'`; \ st=0; \ for files in \ "$$py_files" \ "$$pyc_files" \ "$$pyo_files" \ "$$pyc_files_pep3147" \ "$$pyo_files_pep3147" \ ; do \ $(am__uninstall_files_from_dir) || st=$$?; \ done; \ exit $$st install-targetdPYTHON: $(targetd_PYTHON) @$(NORMAL_INSTALL) @list='$(targetd_PYTHON)'; dlist=; list2=; test -n "$(targetddir)" || list=; \ if test -n "$$list"; then \ echo " $(MKDIR_P) '$(DESTDIR)$(targetddir)'"; \ $(MKDIR_P) "$(DESTDIR)$(targetddir)" || exit 1; \ fi; \ for p in $$list; do \ if test -f "$$p"; then b=; else b="$(srcdir)/"; fi; \ if test -f $$b$$p; then \ $(am__strip_dir) \ dlist="$$dlist $$f"; \ list2="$$list2 $$b$$p"; \ else :; fi; \ done; \ for file in $$list2; do echo $$file; done | $(am__base_list) | \ while read files; do \ echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(targetddir)'"; \ $(INSTALL_DATA) $$files "$(DESTDIR)$(targetddir)" || exit $$?; \ done || exit $$?; \ if test -n "$$dlist"; then \ $(am__py_compile) --destdir "$(DESTDIR)" \ --basedir "$(targetddir)" $$dlist; \ else :; fi uninstall-targetdPYTHON: @$(NORMAL_UNINSTALL) @list='$(targetd_PYTHON)'; test -n "$(targetddir)" || list=; \ py_files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ test -n "$$py_files" || exit 0; \ dir='$(DESTDIR)$(targetddir)'; \ pyc_files=`echo "$$py_files" | sed 's|$$|c|'`; \ pyo_files=`echo "$$py_files" | sed 's|$$|o|'`; \ py_files_pep3147=`echo "$$py_files" | $(am__pep3147_tweak)`; \ echo "$$py_files_pep3147";\ pyc_files_pep3147=`echo "$$py_files_pep3147" | sed 's|$$|c|'`; \ pyo_files_pep3147=`echo "$$py_files_pep3147" | sed 's|$$|o|'`; \ st=0; \ for files in \ "$$py_files" \ "$$pyc_files" \ "$$pyo_files" \ "$$pyc_files_pep3147" \ "$$pyo_files_pep3147" \ ; do \ $(am__uninstall_files_from_dir) || st=$$?; \ done; \ exit $$st # This directory's subdirectories are mostly independent; you can cd # into them and run 'make' without going through this Makefile. # To change the values of 'make' variables: instead of editing Makefiles, # (1) if the variable is set in 'config.status', edit 'config.status' # (which will cause the Makefiles to be regenerated when you run 'make'); # (2) otherwise, pass the desired values on the 'make' command line. $(am__recursive_targets): @fail=; \ if $(am__make_keepgoing); then \ failcom='fail=yes'; \ else \ failcom='exit 1'; \ fi; \ dot_seen=no; \ target=`echo $@ | sed s/-recursive//`; \ case "$@" in \ distclean-* | maintainer-clean-*) list='$(DIST_SUBDIRS)' ;; \ *) list='$(SUBDIRS)' ;; \ esac; \ for subdir in $$list; do \ echo "Making $$target in $$subdir"; \ if test "$$subdir" = "."; then \ dot_seen=yes; \ local_target="$$target-am"; \ else \ local_target="$$target"; \ fi; \ ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \ || eval $$failcom; \ done; \ if test "$$dot_seen" = "no"; then \ $(MAKE) $(AM_MAKEFLAGS) "$$target-am" || exit 1; \ fi; test -z "$$fail" ID: $(am__tagged_files) $(am__define_uniq_tagged_files); mkid -fID $$unique tags: tags-recursive TAGS: tags tags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files) set x; \ here=`pwd`; \ if ($(ETAGS) --etags-include --version) >/dev/null 2>&1; then \ include_option=--etags-include; \ empty_fix=.; \ else \ include_option=--include; \ empty_fix=; \ fi; \ list='$(SUBDIRS)'; for subdir in $$list; do \ if test "$$subdir" = .; then :; else \ test ! -f $$subdir/TAGS || \ set "$$@" "$$include_option=$$here/$$subdir/TAGS"; \ fi; \ done; \ $(am__define_uniq_tagged_files); \ shift; \ if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ if test $$# -gt 0; then \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ "$$@" $$unique; \ else \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ $$unique; \ fi; \ fi ctags: ctags-recursive CTAGS: ctags ctags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files) $(am__define_uniq_tagged_files); \ test -z "$(CTAGS_ARGS)$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$unique GTAGS: here=`$(am__cd) $(top_builddir) && pwd` \ && $(am__cd) $(top_srcdir) \ && gtags -i $(GTAGS_ARGS) "$$here" cscopelist: cscopelist-recursive cscopelist-am: $(am__tagged_files) list='$(am__tagged_files)'; \ case "$(srcdir)" in \ [\\/]* | ?:[\\/]*) sdir="$(srcdir)" ;; \ *) sdir=$(subdir)/$(srcdir) ;; \ esac; \ for i in $$list; do \ if test -f "$$i"; then \ echo "$(subdir)/$$i"; \ else \ echo "$$sdir/$$i"; \ fi; \ done >> $(top_builddir)/cscope.files distclean-tags: -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags distdir: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done @list='$(DIST_SUBDIRS)'; for subdir in $$list; do \ if test "$$subdir" = .; then :; else \ $(am__make_dryrun) \ || test -d "$(distdir)/$$subdir" \ || $(MKDIR_P) "$(distdir)/$$subdir" \ || exit 1; \ dir1=$$subdir; dir2="$(distdir)/$$subdir"; \ $(am__relativize); \ new_distdir=$$reldir; \ dir1=$$subdir; dir2="$(top_distdir)"; \ $(am__relativize); \ new_top_distdir=$$reldir; \ echo " (cd $$subdir && $(MAKE) $(AM_MAKEFLAGS) top_distdir="$$new_top_distdir" distdir="$$new_distdir" \\"; \ echo " am__remove_distdir=: am__skip_length_check=: am__skip_mode_fix=: distdir)"; \ ($(am__cd) $$subdir && \ $(MAKE) $(AM_MAKEFLAGS) \ top_distdir="$$new_top_distdir" \ distdir="$$new_distdir" \ am__remove_distdir=: \ am__skip_length_check=: \ am__skip_mode_fix=: \ distdir) \ || exit 1; \ fi; \ done check-am: all-am check: check-recursive all-am: Makefile $(SCRIPTS) installdirs: installdirs-recursive installdirs-am: for dir in "$(DESTDIR)$(bindir)" "$(DESTDIR)$(nstordir)" "$(DESTDIR)$(ontapdir)" "$(DESTDIR)$(plugindir)" "$(DESTDIR)$(simdir)" "$(DESTDIR)$(smispydir)" "$(DESTDIR)$(targetddir)"; do \ test -z "$$dir" || $(MKDIR_P) "$$dir"; \ done install: install-recursive install-exec: install-exec-recursive install-data: install-data-recursive uninstall: uninstall-recursive install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-recursive install-strip: if test -z '$(STRIP)'; then \ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ install; \ else \ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ fi mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-recursive clean-am: clean-generic clean-libtool mostlyclean-am distclean: distclean-recursive -rm -f Makefile distclean-am: clean-am distclean-generic distclean-tags dvi: dvi-recursive dvi-am: html: html-recursive html-am: info: info-recursive info-am: install-data-am: install-nstorPYTHON install-ontapPYTHON \ install-pluginPYTHON install-simPYTHON install-smispyPYTHON \ install-targetdPYTHON install-dvi: install-dvi-recursive install-dvi-am: install-exec-am: install-dist_binSCRIPTS install-html: install-html-recursive install-html-am: install-info: install-info-recursive install-info-am: install-man: install-pdf: install-pdf-recursive install-pdf-am: install-ps: install-ps-recursive install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-recursive -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-recursive mostlyclean-am: mostlyclean-generic mostlyclean-libtool pdf: pdf-recursive pdf-am: ps: ps-recursive ps-am: uninstall-am: uninstall-dist_binSCRIPTS uninstall-nstorPYTHON \ uninstall-ontapPYTHON uninstall-pluginPYTHON \ uninstall-simPYTHON uninstall-smispyPYTHON \ uninstall-targetdPYTHON .MAKE: $(am__recursive_targets) install-am install-strip .PHONY: $(am__recursive_targets) CTAGS GTAGS TAGS all all-am check \ check-am clean clean-generic clean-libtool cscopelist-am ctags \ ctags-am distclean distclean-generic distclean-libtool \ distclean-tags distdir dvi dvi-am html html-am info info-am \ install install-am install-data install-data-am \ install-dist_binSCRIPTS install-dvi install-dvi-am \ install-exec install-exec-am install-html install-html-am \ install-info install-info-am install-man install-nstorPYTHON \ install-ontapPYTHON install-pdf install-pdf-am \ install-pluginPYTHON install-ps install-ps-am \ install-simPYTHON install-smispyPYTHON install-strip \ install-targetdPYTHON installcheck installcheck-am installdirs \ installdirs-am maintainer-clean maintainer-clean-generic \ mostlyclean mostlyclean-generic mostlyclean-libtool pdf pdf-am \ ps ps-am tags tags-am uninstall uninstall-am \ uninstall-dist_binSCRIPTS uninstall-nstorPYTHON \ uninstall-ontapPYTHON uninstall-pluginPYTHON \ uninstall-simPYTHON uninstall-smispyPYTHON \ uninstall-targetdPYTHON # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: libstoragemgmt-1.2.3/plugin/hpsa/0000775000175000017500000000000012542455463014003 500000000000000libstoragemgmt-1.2.3/plugin/hpsa/hpsa_lsmplugin0000775000175000017500000000236612537737032016704 00000000000000#!/usr/bin/env python2 # Copyright (C) 2015 Red Hat, Inc. # This library is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2.1 of the License, or any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library; If not, see . # # Author: tasleson # Gris Ge import sys import syslog import traceback try: from lsm import PluginRunner from lsm.plugin.hpsa import SmartArray if __name__ == '__main__': PluginRunner(SmartArray, sys.argv).run() except Exception: #This should be quite rare, but when it does happen this is pretty #key in understanding what happened, especially when it happens when #running from the daemon. msg = str(traceback.format_exc()) syslog.syslog(syslog.LOG_ERR, msg) sys.stderr.write(msg) sys.exit(1) libstoragemgmt-1.2.3/plugin/hpsa/Makefile.am0000664000175000017500000000024112537546123015752 00000000000000if WITH_HPSA plugindir = $(pythondir)/lsm/plugin hpsadir = $(plugindir)/hpsa hpsa_PYTHON = __init__.py hpsa.py utils.py dist_bin_SCRIPTS= hpsa_lsmplugin endif libstoragemgmt-1.2.3/plugin/hpsa/__init__.py0000664000175000017500000000005412537546123016031 00000000000000from lsm.plugin.hpsa.hpsa import SmartArray libstoragemgmt-1.2.3/plugin/hpsa/Makefile.in0000664000175000017500000004370112542455445015775 00000000000000# Makefile.in generated by automake 1.13.4 from Makefile.am. # @configure_input@ # Copyright (C) 1994-2013 Free Software Foundation, Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__is_gnu_make = test -n '$(MAKEFILE_LIST)' && test -n '$(MAKELEVEL)' am__make_running_with_option = \ case $${target_option-} in \ ?) ;; \ *) echo "am__make_running_with_option: internal error: invalid" \ "target option '$${target_option-}' specified" >&2; \ exit 1;; \ esac; \ has_opt=no; \ sane_makeflags=$$MAKEFLAGS; \ if $(am__is_gnu_make); then \ sane_makeflags=$$MFLAGS; \ else \ case $$MAKEFLAGS in \ *\\[\ \ ]*) \ bs=\\; \ sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \ | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \ esac; \ fi; \ skip_next=no; \ strip_trailopt () \ { \ flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \ }; \ for flg in $$sane_makeflags; do \ test $$skip_next = yes && { skip_next=no; continue; }; \ case $$flg in \ *=*|--*) continue;; \ -*I) strip_trailopt 'I'; skip_next=yes;; \ -*I?*) strip_trailopt 'I';; \ -*O) strip_trailopt 'O'; skip_next=yes;; \ -*O?*) strip_trailopt 'O';; \ -*l) strip_trailopt 'l'; skip_next=yes;; \ -*l?*) strip_trailopt 'l';; \ -[dEDm]) skip_next=yes;; \ -[JT]) skip_next=yes;; \ esac; \ case $$flg in \ *$$target_option*) has_opt=yes; break;; \ esac; \ done; \ test $$has_opt = yes am__make_dryrun = (target_option=n; $(am__make_running_with_option)) am__make_keepgoing = (target_option=k; $(am__make_running_with_option)) pkgdatadir = $(datadir)/@PACKAGE@ pkgincludedir = $(includedir)/@PACKAGE@ pkglibdir = $(libdir)/@PACKAGE@ pkglibexecdir = $(libexecdir)/@PACKAGE@ am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ subdir = plugin/hpsa DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/Makefile.am \ $(am__dist_bin_SCRIPTS_DIST) $(am__hpsa_PYTHON_DIST) \ $(top_srcdir)/build-aux/py-compile ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = $(top_srcdir)/m4/ax_python_module.m4 \ $(top_srcdir)/m4/libtool.m4 $(top_srcdir)/m4/ltoptions.m4 \ $(top_srcdir)/m4/ltsugar.m4 $(top_srcdir)/m4/ltversion.m4 \ $(top_srcdir)/m4/lt~obsolete.m4 $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) mkinstalldirs = $(install_sh) -d CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = CONFIG_CLEAN_VPATH_FILES = am__dist_bin_SCRIPTS_DIST = hpsa_lsmplugin am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; am__vpath_adj = case $$p in \ $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ *) f=$$p;; \ esac; am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`; am__install_max = 40 am__nobase_strip_setup = \ srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'` am__nobase_strip = \ for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||" am__nobase_list = $(am__nobase_strip_setup); \ for p in $$list; do echo "$$p $$p"; done | \ sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \ $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \ if (++n[$$2] == $(am__install_max)) \ { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \ END { for (dir in files) print dir, files[dir] }' am__base_list = \ sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \ sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g' am__uninstall_files_from_dir = { \ test -z "$$files" \ || { test ! -d "$$dir" && test ! -f "$$dir" && test ! -r "$$dir"; } \ || { echo " ( cd '$$dir' && rm -f" $$files ")"; \ $(am__cd) "$$dir" && rm -f $$files; }; \ } am__installdirs = "$(DESTDIR)$(bindir)" "$(DESTDIR)$(hpsadir)" SCRIPTS = $(dist_bin_SCRIPTS) AM_V_P = $(am__v_P_@AM_V@) am__v_P_ = $(am__v_P_@AM_DEFAULT_V@) am__v_P_0 = false am__v_P_1 = : AM_V_GEN = $(am__v_GEN_@AM_V@) am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@) am__v_GEN_0 = @echo " GEN " $@; am__v_GEN_1 = AM_V_at = $(am__v_at_@AM_V@) am__v_at_ = $(am__v_at_@AM_DEFAULT_V@) am__v_at_0 = @ am__v_at_1 = SOURCES = DIST_SOURCES = am__can_run_installinfo = \ case $$AM_UPDATE_INFO_DIR in \ n|no|NO) false;; \ *) (install-info --version) >/dev/null 2>&1;; \ esac am__hpsa_PYTHON_DIST = __init__.py hpsa.py utils.py am__py_compile = PYTHON=$(PYTHON) $(SHELL) $(py_compile) am__pep3147_tweak = \ sed -e 's|\.py$$||' -e 's|[^/]*$$|__pycache__/&.*.py|' py_compile = $(top_srcdir)/build-aux/py-compile am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) ACLOCAL = @ACLOCAL@ AMTAR = @AMTAR@ AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@ AR = @AR@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ FGREP = @FGREP@ GREP = @GREP@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ JSON_CFLAGS = @JSON_CFLAGS@ JSON_LIBS = @JSON_LIBS@ LD = @LD@ LDFLAGS = @LDFLAGS@ LIBCHECK_CFLAGS = @LIBCHECK_CFLAGS@ LIBCHECK_LIBS = @LIBCHECK_LIBS@ LIBCONFIG_CFLAGS = @LIBCONFIG_CFLAGS@ LIBCONFIG_LIBS = @LIBCONFIG_LIBS@ LIBGLIB_CFLAGS = @LIBGLIB_CFLAGS@ LIBGLIB_LIBS = @LIBGLIB_LIBS@ LIBMICROHTTPD_CFLAGS = @LIBMICROHTTPD_CFLAGS@ LIBMICROHTTPD_LIBS = @LIBMICROHTTPD_LIBS@ LIBOBJS = @LIBOBJS@ LIBS = @LIBS@ LIBSM_LIBTOOL_VERSION = @LIBSM_LIBTOOL_VERSION@ LIBSM_MAJOR_VERSION = @LIBSM_MAJOR_VERSION@ LIBSM_MICRO_VERSION = @LIBSM_MICRO_VERSION@ LIBSM_MINOR_VERSION = @LIBSM_MINOR_VERSION@ LIBSM_VERSION = @LIBSM_VERSION@ LIBSM_VERSION_INFO = @LIBSM_VERSION_INFO@ LIBSM_VERSION_NUMBER = @LIBSM_VERSION_NUMBER@ LIBTOOL = @LIBTOOL@ LIBXML_CFLAGS = @LIBXML_CFLAGS@ LIBXML_LIBS = @LIBXML_LIBS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBOBJS = @LTLIBOBJS@ MAINT = @MAINT@ MAKEINFO = @MAKEINFO@ MANIFEST_TOOL = @MANIFEST_TOOL@ MKDIR_P = @MKDIR_P@ NM = @NM@ NMEDIT = @NMEDIT@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_URL = @PACKAGE_URL@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PERL = @PERL@ PKG_CONFIG = @PKG_CONFIG@ PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@ PKG_CONFIG_PATH = @PKG_CONFIG_PATH@ PYTHON = @PYTHON@ PYTHON_EXEC_PREFIX = @PYTHON_EXEC_PREFIX@ PYTHON_PLATFORM = @PYTHON_PLATFORM@ PYTHON_PREFIX = @PYTHON_PREFIX@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SSL_LIBS = @SSL_LIBS@ STRIP = @STRIP@ VERSION = @VERSION@ YAJL_LIBS = @YAJL_LIBS@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_AR = @ac_ct_AR@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ bashcompletiondir = @bashcompletiondir@ bindir = @bindir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ datadir = @datadir@ datarootdir = @datarootdir@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ install_sh = @install_sh@ libdir = @libdir@ libexecdir = @libexecdir@ localedir = @localedir@ localstatedir = @localstatedir@ mandir = @mandir@ mkdir_p = @mkdir_p@ oldincludedir = @oldincludedir@ pdfdir = @pdfdir@ pkgpyexecdir = @pkgpyexecdir@ pkgpythondir = @pkgpythondir@ prefix = @prefix@ program_transform_name = @program_transform_name@ psdir = @psdir@ pyexecdir = @pyexecdir@ pythondir = @pythondir@ sbindir = @sbindir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ systemdsystemunitdir = @systemdsystemunitdir@ target_alias = @target_alias@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ @WITH_HPSA_TRUE@plugindir = $(pythondir)/lsm/plugin @WITH_HPSA_TRUE@hpsadir = $(plugindir)/hpsa @WITH_HPSA_TRUE@hpsa_PYTHON = __init__.py hpsa.py utils.py @WITH_HPSA_TRUE@dist_bin_SCRIPTS = hpsa_lsmplugin all: all-am .SUFFIXES: $(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu plugin/hpsa/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --gnu plugin/hpsa/Makefile .PRECIOUS: Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): install-dist_binSCRIPTS: $(dist_bin_SCRIPTS) @$(NORMAL_INSTALL) @list='$(dist_bin_SCRIPTS)'; test -n "$(bindir)" || list=; \ if test -n "$$list"; then \ echo " $(MKDIR_P) '$(DESTDIR)$(bindir)'"; \ $(MKDIR_P) "$(DESTDIR)$(bindir)" || exit 1; \ fi; \ for p in $$list; do \ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ if test -f "$$d$$p"; then echo "$$d$$p"; echo "$$p"; else :; fi; \ done | \ sed -e 'p;s,.*/,,;n' \ -e 'h;s|.*|.|' \ -e 'p;x;s,.*/,,;$(transform)' | sed 'N;N;N;s,\n, ,g' | \ $(AWK) 'BEGIN { files["."] = ""; dirs["."] = 1; } \ { d=$$3; if (dirs[d] != 1) { print "d", d; dirs[d] = 1 } \ if ($$2 == $$4) { files[d] = files[d] " " $$1; \ if (++n[d] == $(am__install_max)) { \ print "f", d, files[d]; n[d] = 0; files[d] = "" } } \ else { print "f", d "/" $$4, $$1 } } \ END { for (d in files) print "f", d, files[d] }' | \ while read type dir files; do \ if test "$$dir" = .; then dir=; else dir=/$$dir; fi; \ test -z "$$files" || { \ echo " $(INSTALL_SCRIPT) $$files '$(DESTDIR)$(bindir)$$dir'"; \ $(INSTALL_SCRIPT) $$files "$(DESTDIR)$(bindir)$$dir" || exit $$?; \ } \ ; done uninstall-dist_binSCRIPTS: @$(NORMAL_UNINSTALL) @list='$(dist_bin_SCRIPTS)'; test -n "$(bindir)" || exit 0; \ files=`for p in $$list; do echo "$$p"; done | \ sed -e 's,.*/,,;$(transform)'`; \ dir='$(DESTDIR)$(bindir)'; $(am__uninstall_files_from_dir) mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs install-hpsaPYTHON: $(hpsa_PYTHON) @$(NORMAL_INSTALL) @list='$(hpsa_PYTHON)'; dlist=; list2=; test -n "$(hpsadir)" || list=; \ if test -n "$$list"; then \ echo " $(MKDIR_P) '$(DESTDIR)$(hpsadir)'"; \ $(MKDIR_P) "$(DESTDIR)$(hpsadir)" || exit 1; \ fi; \ for p in $$list; do \ if test -f "$$p"; then b=; else b="$(srcdir)/"; fi; \ if test -f $$b$$p; then \ $(am__strip_dir) \ dlist="$$dlist $$f"; \ list2="$$list2 $$b$$p"; \ else :; fi; \ done; \ for file in $$list2; do echo $$file; done | $(am__base_list) | \ while read files; do \ echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(hpsadir)'"; \ $(INSTALL_DATA) $$files "$(DESTDIR)$(hpsadir)" || exit $$?; \ done || exit $$?; \ if test -n "$$dlist"; then \ $(am__py_compile) --destdir "$(DESTDIR)" \ --basedir "$(hpsadir)" $$dlist; \ else :; fi uninstall-hpsaPYTHON: @$(NORMAL_UNINSTALL) @list='$(hpsa_PYTHON)'; test -n "$(hpsadir)" || list=; \ py_files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ test -n "$$py_files" || exit 0; \ dir='$(DESTDIR)$(hpsadir)'; \ pyc_files=`echo "$$py_files" | sed 's|$$|c|'`; \ pyo_files=`echo "$$py_files" | sed 's|$$|o|'`; \ py_files_pep3147=`echo "$$py_files" | $(am__pep3147_tweak)`; \ echo "$$py_files_pep3147";\ pyc_files_pep3147=`echo "$$py_files_pep3147" | sed 's|$$|c|'`; \ pyo_files_pep3147=`echo "$$py_files_pep3147" | sed 's|$$|o|'`; \ st=0; \ for files in \ "$$py_files" \ "$$pyc_files" \ "$$pyo_files" \ "$$pyc_files_pep3147" \ "$$pyo_files_pep3147" \ ; do \ $(am__uninstall_files_from_dir) || st=$$?; \ done; \ exit $$st tags TAGS: ctags CTAGS: cscope cscopelist: distdir: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done check-am: all-am check: check-am all-am: Makefile $(SCRIPTS) installdirs: for dir in "$(DESTDIR)$(bindir)" "$(DESTDIR)$(hpsadir)"; do \ test -z "$$dir" || $(MKDIR_P) "$$dir"; \ done install: install-am install-exec: install-exec-am install-data: install-data-am uninstall: uninstall-am install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-am install-strip: if test -z '$(STRIP)'; then \ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ install; \ else \ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ fi mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-am clean-am: clean-generic clean-libtool mostlyclean-am distclean: distclean-am -rm -f Makefile distclean-am: clean-am distclean-generic dvi: dvi-am dvi-am: html: html-am html-am: info: info-am info-am: install-data-am: install-hpsaPYTHON install-dvi: install-dvi-am install-dvi-am: install-exec-am: install-dist_binSCRIPTS install-html: install-html-am install-html-am: install-info: install-info-am install-info-am: install-man: install-pdf: install-pdf-am install-pdf-am: install-ps: install-ps-am install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-am -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-am mostlyclean-am: mostlyclean-generic mostlyclean-libtool pdf: pdf-am pdf-am: ps: ps-am ps-am: uninstall-am: uninstall-dist_binSCRIPTS uninstall-hpsaPYTHON .MAKE: install-am install-strip .PHONY: all all-am check check-am clean clean-generic clean-libtool \ cscopelist-am ctags-am distclean distclean-generic \ distclean-libtool distdir dvi dvi-am html html-am info info-am \ install install-am install-data install-data-am \ install-dist_binSCRIPTS install-dvi install-dvi-am \ install-exec install-exec-am install-hpsaPYTHON install-html \ install-html-am install-info install-info-am install-man \ install-pdf install-pdf-am install-ps install-ps-am \ install-strip installcheck installcheck-am installdirs \ maintainer-clean maintainer-clean-generic mostlyclean \ mostlyclean-generic mostlyclean-libtool pdf pdf-am ps ps-am \ tags-am uninstall uninstall-am uninstall-dist_binSCRIPTS \ uninstall-hpsaPYTHON # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: libstoragemgmt-1.2.3/plugin/hpsa/utils.py0000664000175000017500000000353112537737032015436 00000000000000## Copyright (C) 2015 Red Hat, Inc. # This library is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2.1 of the License, or any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library; If not, see . # # Author: Gris Ge import subprocess import os def cmd_exec(cmds): """ Execute provided command and return the STDOUT as string. Raise ExecError if command return code is not zero """ cmd_popen = subprocess.Popen( cmds, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env={"PATH": os.getenv("PATH")}) str_stdout = "".join(list(cmd_popen.stdout)).strip() str_stderr = "".join(list(cmd_popen.stderr)).strip() errno = cmd_popen.wait() if errno != 0: raise ExecError(" ".join(cmds), errno, str_stdout, str_stderr) return str_stdout def file_read(file_path): """ Read file and return string of file content. """ fd = open(file_path, 'r') content = fd.read() fd.close() return content class ExecError(Exception): def __init__(self, cmd, errno, stdout, stderr, *args, **kwargs): Exception.__init__(self, *args, **kwargs) self.cmd = cmd self.errno = errno self.stdout = stdout self.stderr = stderr def __str__(self): return "cmd: '%s', errno: %d, stdout: '%s', stderr: '%s'" % \ (self.cmd, self.errno, self.stdout, self.stderr) libstoragemgmt-1.2.3/plugin/hpsa/hpsa.py0000664000175000017500000006467512542267433015250 00000000000000# Copyright (C) 2015 Red Hat, Inc. # This library is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2.1 of the License, or any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library; If not, see . # # Author: Gris Ge import os import errno import re from lsm import ( IPlugin, Client, Capabilities, VERSION, LsmError, ErrorNumber, uri_parse, System, Pool, size_human_2_size_bytes, search_property, Volume, Disk) from lsm.plugin.hpsa.utils import cmd_exec, ExecError, file_read def _handle_errors(method): def _wrapper(*args, **kwargs): try: return method(*args, **kwargs) except LsmError: raise except KeyError as key_error: raise LsmError( ErrorNumber.PLUGIN_BUG, "Expected key missing from SmartArray hpssacli output:%s" % key_error) except ExecError as exec_error: if 'No controllers detected' in exec_error.stdout: raise LsmError( ErrorNumber.NOT_FOUND_SYSTEM, "No HP SmartArray deteceted by hpssacli.") else: raise LsmError(ErrorNumber.PLUGIN_BUG, str(exec_error)) except Exception as common_error: raise LsmError( ErrorNumber.PLUGIN_BUG, "Got unexpected error %s" % common_error) return _wrapper def _sys_status_of(hp_ctrl_status): """ Base on data of "hpssacli ctrl all show status" """ status_info = '' status = System.STATUS_UNKNOWN check_list = [ 'Controller Status', 'Cache Status', 'Battery/Capacitor Status'] for key_name in check_list: if key_name in hp_ctrl_status and hp_ctrl_status[key_name] != 'OK': # TODO(Gris Ge): Beg HP for possible values status = System.STATUS_OTHER status_info += hp_ctrl_status[key_name] if status != System.STATUS_OTHER: status = System.STATUS_OK return status, status_info def _parse_hpssacli_output(output): """ Got a output string of hpssacli to dictionary(nested). Skipped these line: 1. Starts with 'Note:' This is just a message right after controller. We don't neet it yet. 2. The 'Physical Drives' line. It should indented after 'Internal Drive Cage' like. If not ignored, we might got duplication line error. After ignored, it's phsycial disks will directly stored as key of 'Internal Drive Cage' dictionary. """ output_lines = [ l for l in output.split("\n") if l and not l.startswith('Note:') and not l.strip() == 'Physical Drives'] data = {} # Detemine indention level top_indention_level = sorted( set( len(line) - len(line.lstrip()) for line in output_lines))[0] indent_2_data = { top_indention_level: data } for line_num in range(len(output_lines)): cur_line = output_lines[line_num] if cur_line.strip() == 'None attached': continue if line_num + 1 == len(output_lines): nxt_line = '' else: nxt_line = output_lines[line_num + 1] cur_indent_count = len(cur_line) - len(cur_line.lstrip()) nxt_indent_count = len(nxt_line) - len(nxt_line.lstrip()) cur_line_splitted = cur_line.split(": ") cur_data_pointer = indent_2_data[cur_indent_count] if nxt_indent_count > cur_indent_count: nxt_line_splitted = nxt_line.split(": ") new_data = {} if cur_line.lstrip() not in cur_data_pointer: cur_data_pointer[cur_line.lstrip()] = new_data indent_2_data[nxt_indent_count] = new_data else: raise LsmError( ErrorNumber.PLUGIN_BUG, "_parse_hpssacli_output(): Found duplicate line %s" % cur_line) else: if len(cur_line_splitted) == 1: cur_data_pointer[cur_line.lstrip()] = None else: cur_data_pointer[cur_line_splitted[0].lstrip()] = \ ": ".join(cur_line_splitted[1:]).strip() return data def _hp_size_to_lsm(hp_size): """ HP Using 'TB, GB, MB, KB' and etc, for LSM, they are 'TiB' and etc. Return int of block bytes """ re_regex = re.compile("^([0-9.]+) +([EPTGMK])B") re_match = re_regex.match(hp_size) if re_match: return size_human_2_size_bytes( "%s%siB" % (re_match.group(1), re_match.group(2))) raise LsmError( ErrorNumber.PLUGIN_BUG, "_hp_size_to_lsm(): Got unexpected HP size string %s" % hp_size) def _pool_status_of(hp_array): """ Return (status, status_info) """ if hp_array['Status'] == 'OK': return Pool.STATUS_OK, '' else: # TODO(Gris Ge): Try degrade a RAID or fail a RAID. return Pool.STATUS_OTHER, hp_array['Status'] def _pool_id_of(sys_id, array_name): return "%s:%s" % (sys_id, array_name.replace(' ', '')) def _disk_type_of(hp_disk): disk_interface = hp_disk['Interface Type'] if disk_interface == 'SATA': return Disk.TYPE_SATA elif disk_interface == 'Solid State SATA': return Disk.TYPE_SSD elif disk_interface == 'SAS': return Disk.TYPE_SAS return Disk.TYPE_UNKNOWN def _disk_status_of(hp_disk, flag_free): # TODO(Gris Ge): Need more document or test for non-OK disks. if hp_disk['Status'] == 'OK': disk_status = Disk.STATUS_OK else: disk_status = Disk.STATUS_OTHER if flag_free: disk_status |= Disk.STATUS_FREE return disk_status _HP_RAID_LEVEL_CONV = { '0': Volume.RAID_TYPE_RAID0, # TODO(Gris Ge): Investigate whether HP has 4 disks RAID 1. # In LSM, that's RAID10. '1': Volume.RAID_TYPE_RAID1, '5': Volume.RAID_TYPE_RAID5, '6': Volume.RAID_TYPE_RAID6, '1+0': Volume.RAID_TYPE_RAID10, '50': Volume.RAID_TYPE_RAID50, '60': Volume.RAID_TYPE_RAID60, } _HP_VENDOR_RAID_LEVELS = ['1adm', '1+0adm'] _LSM_RAID_TYPE_CONV = dict( zip(_HP_RAID_LEVEL_CONV.values(), _HP_RAID_LEVEL_CONV.keys())) def _hp_raid_level_to_lsm(hp_ld): """ Based on this property: Fault Tolerance: 0/1/5/6/1+0 """ hp_raid_level = hp_ld['Fault Tolerance'] if hp_raid_level in _HP_VENDOR_RAID_LEVELS: return Volume.RAID_TYPE_OTHER return _HP_RAID_LEVEL_CONV.get(hp_raid_level, Volume.RAID_TYPE_UNKNOWN) def _lsm_raid_type_to_hp(raid_type): try: return _LSM_RAID_TYPE_CONV[raid_type] except KeyError: raise LsmError( ErrorNumber.NO_SUPPORT, "Not supported raid type %d" % raid_type) class SmartArray(IPlugin): _DEFAULT_BIN_PATHS = [ "/usr/sbin/hpssacli", "/opt/hp/hpssacli/bld/hpssacli"] def __init__(self): self._sacli_bin = None def _find_sacli(self): """ Try _DEFAULT_MDADM_BIN_PATHS """ for cur_path in SmartArray._DEFAULT_BIN_PATHS: if os.path.lexists(cur_path): self._sacli_bin = cur_path if not self._sacli_bin: raise LsmError( ErrorNumber.INVALID_ARGUMENT, "SmartArray sacli is not installed correctly") @_handle_errors def plugin_register(self, uri, password, timeout, flags=Client.FLAG_RSVD): if os.geteuid() != 0: raise LsmError( ErrorNumber.INVALID_ARGUMENT, "This plugin requires root privilege both daemon and client") uri_parsed = uri_parse(uri) self._sacli_bin = uri_parsed.get('parameters', {}).get('hpssacli') if not self._sacli_bin: self._find_sacli() self._sacli_exec(['version'], flag_convert=False) @_handle_errors def plugin_unregister(self, flags=Client.FLAG_RSVD): pass @_handle_errors def job_status(self, job_id, flags=Client.FLAG_RSVD): raise LsmError(ErrorNumber.NO_SUPPORT, "Not supported yet") @_handle_errors def job_free(self, job_id, flags=Client.FLAG_RSVD): pass @_handle_errors def plugin_info(self, flags=Client.FLAG_RSVD): return "HP SmartArray Plugin", VERSION @_handle_errors def time_out_set(self, ms, flags=Client.FLAG_RSVD): raise LsmError(ErrorNumber.NO_SUPPORT, "Not supported yet") @_handle_errors def time_out_get(self, flags=Client.FLAG_RSVD): raise LsmError(ErrorNumber.NO_SUPPORT, "Not supported yet") @_handle_errors def capabilities(self, system, flags=Client.FLAG_RSVD): cur_lsm_syss = self.systems() if system.id not in list(s.id for s in cur_lsm_syss): raise LsmError( ErrorNumber.NOT_FOUND_SYSTEM, "System not found") cap = Capabilities() cap.set(Capabilities.VOLUMES) cap.set(Capabilities.DISKS) cap.set(Capabilities.VOLUME_RAID_INFO) cap.set(Capabilities.POOL_MEMBER_INFO) cap.set(Capabilities.VOLUME_RAID_CREATE) return cap def _sacli_exec(self, sacli_cmds, flag_convert=True): """ If flag_convert is True, convert data into dict. """ sacli_cmds.insert(0, self._sacli_bin) try: output = cmd_exec(sacli_cmds) except OSError as os_error: if os_error.errno == errno.ENOENT: raise LsmError( ErrorNumber.INVALID_ARGUMENT, "hpssacli binary '%s' is not exist or executable." % self._sacli_bin) else: raise if flag_convert: return _parse_hpssacli_output(output) else: return output @_handle_errors def systems(self, flags=0): """ Depend on command: hpssacli ctrl all show detail hpssacli ctrl all show status """ rc_lsm_syss = [] ctrl_all_show = self._sacli_exec( ["ctrl", "all", "show", "detail"]) ctrl_all_status = self._sacli_exec( ["ctrl", "all", "show", "status"]) for ctrl_name in ctrl_all_show.keys(): ctrl_data = ctrl_all_show[ctrl_name] sys_id = ctrl_data['Serial Number'] (status, status_info) = _sys_status_of(ctrl_all_status[ctrl_name]) plugin_data = "%s" % ctrl_data['Slot'] rc_lsm_syss.append( System(sys_id, ctrl_name, status, status_info, plugin_data)) return rc_lsm_syss @staticmethod def _hp_array_to_lsm_pool(hp_array, array_name, sys_id, ctrl_num): pool_id = _pool_id_of(sys_id, array_name) name = array_name elem_type = Pool.ELEMENT_TYPE_VOLUME | Pool.ELEMENT_TYPE_VOLUME_FULL unsupported_actions = 0 # TODO(Gris Ge): HP does not provide a precise number of bytes. free_space = _hp_size_to_lsm(hp_array['Unused Space']) total_space = free_space for key_name in hp_array.keys(): if key_name.startswith('Logical Drive'): total_space += _hp_size_to_lsm(hp_array[key_name]['Size']) (status, status_info) = _pool_status_of(hp_array) plugin_data = "%s:%s" % ( ctrl_num, array_name[len("Array: "):]) return Pool( pool_id, name, elem_type, unsupported_actions, total_space, free_space, status, status_info, sys_id, plugin_data) @_handle_errors def pools(self, search_key=None, search_value=None, flags=Client.FLAG_RSVD): """ Depend on command: hpssacli ctrl all show config detail """ lsm_pools = [] ctrl_all_conf = self._sacli_exec( ["ctrl", "all", "show", "config", "detail"]) for ctrl_data in ctrl_all_conf.values(): sys_id = ctrl_data['Serial Number'] ctrl_num = ctrl_data['Slot'] for key_name in ctrl_data.keys(): if key_name.startswith("Array:"): lsm_pools.append( SmartArray._hp_array_to_lsm_pool( ctrl_data[key_name], key_name, sys_id, ctrl_num)) return search_property(lsm_pools, search_key, search_value) @staticmethod def _hp_ld_to_lsm_vol(hp_ld, pool_id, sys_id, ctrl_num, array_num, hp_ld_name): ld_num = hp_ld_name[len("Logical Drive: "):] vpd83 = hp_ld['Unique Identifier'].lower() # No document or command output indicate block size # of volume. So we try to read from linux kernel, if failed # try 512 and roughly calculate the sector count. regex_match = re.compile("/dev/(sd[a-z]+)").search(hp_ld['Disk Name']) vol_name = hp_ld_name if regex_match: sd_name = regex_match.group(1) block_size = int(file_read( "/sys/block/%s/queue/logical_block_size" % sd_name)) num_of_blocks = int(file_read("/sys/block/%s/size" % sd_name)) vol_name += ": /dev/%s" % sd_name else: block_size = 512 num_of_blocks = int(_hp_size_to_lsm(hp_ld['Size']) / block_size) plugin_data = "%s:%s:%s" % (ctrl_num, array_num, ld_num) # HP SmartArray does not allow disabling volume. return Volume( vpd83, vol_name, vpd83, block_size, num_of_blocks, Volume.ADMIN_STATE_ENABLED, sys_id, pool_id, plugin_data) @_handle_errors def volumes(self, search_key=None, search_value=None, flags=Client.FLAG_RSVD): """ Depend on command: hpssacli ctrl all show config detail """ lsm_vols = [] ctrl_all_conf = self._sacli_exec( ["ctrl", "all", "show", "config", "detail"]) for ctrl_data in ctrl_all_conf.values(): ctrl_num = ctrl_data['Slot'] sys_id = ctrl_data['Serial Number'] for key_name in ctrl_data.keys(): if not key_name.startswith("Array:"): continue pool_id = _pool_id_of(sys_id, key_name) array_num = key_name[len('Array: '):] for array_key_name in ctrl_data[key_name].keys(): if not array_key_name.startswith("Logical Drive"): continue lsm_vols.append( SmartArray._hp_ld_to_lsm_vol( ctrl_data[key_name][array_key_name], pool_id, sys_id, ctrl_num, array_num, array_key_name)) return search_property(lsm_vols, search_key, search_value) @staticmethod def _hp_disk_to_lsm_disk(hp_disk, sys_id, ctrl_num, key_name, flag_free=False): disk_id = hp_disk['Serial Number'] disk_num = key_name[len("physicaldrive "):] disk_name = "%s %s" % (hp_disk['Model'], disk_num) disk_type = _disk_type_of(hp_disk) blk_size = int(hp_disk['Native Block Size']) blk_count = int(_hp_size_to_lsm(hp_disk['Size']) / blk_size) status = _disk_status_of(hp_disk, flag_free) plugin_data = "%s:%s" % (ctrl_num, disk_num) return Disk( disk_id, disk_name, disk_type, blk_size, blk_count, status, sys_id, plugin_data) @_handle_errors def disks(self, search_key=None, search_value=None, flags=Client.FLAG_RSVD): """ Depend on command: hpssacli ctrl all show config detail """ # TODO(Gris Ge): Need real test on spare disk. rc_lsm_disks = [] ctrl_all_conf = self._sacli_exec( ["ctrl", "all", "show", "config", "detail"]) for ctrl_data in ctrl_all_conf.values(): sys_id = ctrl_data['Serial Number'] ctrl_num = ctrl_data['Slot'] for key_name in ctrl_data.keys(): if key_name.startswith("Array:"): for array_key_name in ctrl_data[key_name].keys(): if array_key_name.startswith("physicaldrive"): rc_lsm_disks.append( SmartArray._hp_disk_to_lsm_disk( ctrl_data[key_name][array_key_name], sys_id, ctrl_num, array_key_name, flag_free=False)) if key_name == 'unassigned': for array_key_name in ctrl_data[key_name].keys(): if array_key_name.startswith("physicaldrive"): rc_lsm_disks.append( SmartArray._hp_disk_to_lsm_disk( ctrl_data[key_name][array_key_name], sys_id, ctrl_num, array_key_name, flag_free=True)) return search_property(rc_lsm_disks, search_key, search_value) @_handle_errors def volume_raid_info(self, volume, flags=Client.FLAG_RSVD): """ Depend on command: hpssacli ctrl slot=0 show config detail """ if not volume.plugin_data: raise LsmError( ErrorNumber.INVALID_ARGUMENT, "Ilegal input volume argument: missing plugin_data property") (ctrl_num, array_num, ld_num) = volume.plugin_data.split(":") ctrl_data = self._sacli_exec( ["ctrl", "slot=%s" % ctrl_num, "show", "config", "detail"] ).values()[0] disk_count = 0 strip_size = Volume.STRIP_SIZE_UNKNOWN stripe_size = Volume.OPT_IO_SIZE_UNKNOWN raid_type = Volume.RAID_TYPE_UNKNOWN for key_name in ctrl_data.keys(): if key_name != "Array: %s" % array_num: continue for array_key_name in ctrl_data[key_name].keys(): if array_key_name == "Logical Drive: %s" % ld_num: hp_ld = ctrl_data[key_name][array_key_name] raid_type = _hp_raid_level_to_lsm(hp_ld) strip_size = _hp_size_to_lsm(hp_ld['Strip Size']) stripe_size = _hp_size_to_lsm(hp_ld['Full Stripe Size']) elif array_key_name.startswith("physicaldrive"): hp_disk = ctrl_data[key_name][array_key_name] if hp_disk['Drive Type'] == 'Data Drive': disk_count += 1 if disk_count == 0: if strip_size == Volume.STRIP_SIZE_UNKNOWN: raise LsmError( ErrorNumber.PLUGIN_BUG, "volume_raid_info(): Got logical drive %s entry, " % ld_num + "but no physicaldrive entry: %s" % ctrl_data.items()) raise LsmError( ErrorNumber.NOT_FOUND_VOLUME, "Volume not found") return [raid_type, strip_size, disk_count, strip_size, stripe_size] @_handle_errors def pool_member_info(self, pool, flags=Client.FLAG_RSVD): """ Depend on command: hpssacli ctrl slot=0 show config detail """ if not pool.plugin_data: raise LsmError( ErrorNumber.INVALID_ARGUMENT, "Ilegal input volume argument: missing plugin_data property") (ctrl_num, array_num) = pool.plugin_data.split(":") ctrl_data = self._sacli_exec( ["ctrl", "slot=%s" % ctrl_num, "show", "config", "detail"] ).values()[0] disk_ids = [] raid_type = Volume.RAID_TYPE_UNKNOWN for key_name in ctrl_data.keys(): if key_name == "Array: %s" % array_num: for array_key_name in ctrl_data[key_name].keys(): if array_key_name.startswith("Logical Drive: ") and \ raid_type == Volume.RAID_TYPE_UNKNOWN: raid_type = _hp_raid_level_to_lsm( ctrl_data[key_name][array_key_name]) elif array_key_name.startswith("physicaldrive"): hp_disk = ctrl_data[key_name][array_key_name] if hp_disk['Drive Type'] == 'Data Drive': disk_ids.append(hp_disk['Serial Number']) break if len(disk_ids) == 0: raise LsmError( ErrorNumber.NOT_FOUND_POOL, "Pool not found") return raid_type, Pool.MEMBER_TYPE_DISK, disk_ids def _vrc_cap_get(self, ctrl_num): supported_raid_types = [ Volume.RAID_TYPE_RAID0, Volume.RAID_TYPE_RAID1, Volume.RAID_TYPE_RAID5, Volume.RAID_TYPE_RAID50, Volume.RAID_TYPE_RAID10] supported_strip_sizes = [ 8 * 1024, 16 * 1024, 32 * 1024, 64 * 1024, 128 * 1024, 256 * 1024, 512 * 1024, 1024 * 1024] ctrl_conf = self._sacli_exec([ "ctrl", "slot=%s" % ctrl_num, "show", "config", "detail"] ).values()[0] if 'RAID 6 (ADG) Status' in ctrl_conf and \ ctrl_conf['RAID 6 (ADG) Status'] == 'Enabled': supported_raid_types.extend( [Volume.RAID_TYPE_RAID6, Volume.RAID_TYPE_RAID60]) return supported_raid_types, supported_strip_sizes @_handle_errors def volume_raid_create_cap_get(self, system, flags=Client.FLAG_RSVD): """ Depends on this command: hpssacli ctrl slot=0 show config detail All hpsa support RAID 1, 10, 5, 50. If "RAID 6 (ADG) Status: Enabled", it will support RAID 6 and 60. For HP tribile mirror(RAID 1adm and RAID10adm), LSM does support that yet. No command output or document indication special or exceptional support of strip size, assuming all hpsa cards support documented strip sizes. """ if not system.plugin_data: raise LsmError( ErrorNumber.INVALID_ARGUMENT, "Ilegal input system argument: missing plugin_data property") return self._vrc_cap_get(system.plugin_data) @_handle_errors def volume_raid_create(self, name, raid_type, disks, strip_size, flags=Client.FLAG_RSVD): """ Depends on these commands: 1. Create LD hpssacli ctrl slot=0 create type=ld \ drives=1i:1:13,1i:1:14 size=max raid=1+0 ss=64 2. Find out the system ID. 3. Find out the pool fist disk belong. hpssacli ctrl slot=0 pd 1i:1:13 show 4. List all volumes for this new pool. self.volumes(search_key='pool_id', search_value=pool_id) The 'name' argument will be ignored. TODO(Gris Ge): These code only tested for creating 1 disk RAID 0. """ hp_raid_level = _lsm_raid_type_to_hp(raid_type) hp_disk_ids = [] ctrl_num = None for disk in disks: if not disk.plugin_data: raise LsmError( ErrorNumber.INVALID_ARGUMENT, "Illegal input disks argument: missing plugin_data " "property") (cur_ctrl_num, hp_disk_id) = disk.plugin_data.split(':', 1) if ctrl_num and cur_ctrl_num != ctrl_num: raise LsmError( ErrorNumber.INVALID_ARGUMENT, "Illegal input disks argument: disks are not from the " "same controller/system.") ctrl_num = cur_ctrl_num hp_disk_ids.append(hp_disk_id) cmds = [ "ctrl", "slot=%s" % ctrl_num, "create", "type=ld", "drives=%s" % ','.join(hp_disk_ids), 'size=max', 'raid=%s' % hp_raid_level] if strip_size != Volume.VCR_STRIP_SIZE_DEFAULT: cmds.append("ss=%d" % int(strip_size / 1024)) try: self._sacli_exec(cmds, flag_convert=False) except ExecError: # Check whether disk is free requested_disk_ids = [d.id for d in disks] for cur_disk in self.disks(): if cur_disk.id in requested_disk_ids and \ not cur_disk.status & Disk.STATUS_FREE: raise LsmError( ErrorNumber.DISK_NOT_FREE, "Disk %s is not in STATUS_FREE state" % cur_disk.id) # Check whether got unsupported raid type or strip size supported_raid_types, supported_strip_sizes = \ self._vrc_cap_get(ctrl_num) if raid_type not in supported_raid_types: raise LsmError( ErrorNumber.NO_SUPPORT, "Provided raid_type is not supported") if strip_size != Volume.VCR_STRIP_SIZE_DEFAULT and \ strip_size not in supported_strip_sizes: raise LsmError( ErrorNumber.NO_SUPPORT, "Provided strip_size is not supported") raise # Find out the system id to gernerate pool_id sys_output = self._sacli_exec( ['ctrl', "slot=%s" % ctrl_num, 'show']) sys_id = sys_output.values()[0]['Serial Number'] # API code already checked empty 'disks', we will for sure get # valid 'ctrl_num' and 'hp_disk_ids'. pd_output = self._sacli_exec( ['ctrl', "slot=%s" % ctrl_num, 'pd', hp_disk_ids[0], 'show']) if pd_output.values()[0].keys()[0].lower().startswith("array "): hp_array_id = pd_output.values()[0].keys()[0][len("array "):] hp_array_id = "Array:%s" % hp_array_id else: raise LsmError( ErrorNumber.PLUGIN_BUG, "volume_raid_create(): Failed to find out the array ID of " "new array: %s" % pd_output.items()) pool_id = _pool_id_of(sys_id, hp_array_id) lsm_vols = self.volumes(search_key='pool_id', search_value=pool_id) if len(lsm_vols) != 1: raise LsmError( ErrorNumber.PLUGIN_BUG, "volume_raid_create(): Got unexpected count(not 1) of new " "volumes: %s" % lsm_vols) return lsm_vols[0] libstoragemgmt-1.2.3/plugin/smispy/0000775000175000017500000000000012542455463014374 500000000000000libstoragemgmt-1.2.3/plugin/smispy/smispy_lsmplugin0000775000175000017500000000232412537737032017660 00000000000000#!/usr/bin/env python2 # Copyright (C) 2011-2013 Red Hat, Inc. # This library is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2.1 of the License, or any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library; If not, see . # # Author: tasleson import sys import syslog import traceback try: from lsm import PluginRunner from lsm.plugin.smispy.smis import Smis if __name__ == '__main__': PluginRunner(Smis, sys.argv).run() except Exception: #This should be quite rare, but when it does happen this is pretty #key in understanding what happened, especially when it happens when #running from the daemon. msg = str(traceback.format_exc()) syslog.syslog(syslog.LOG_ERR, msg) sys.stderr.write(msg) sys.exit(1) libstoragemgmt-1.2.3/plugin/smispy/smis_cap.py0000664000175000017500000003214012537737032016463 00000000000000# Copyright (C) 2014 Red Hat, Inc. # This library is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2.1 of the License, or any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library; If not, see . from lsm import Capabilities, LsmError, ErrorNumber import dmtf from smis_common import SmisCommon MASK_TYPE_NO_SUPPORT = 0 MASK_TYPE_MASK = 1 MASK_TYPE_GROUP = 2 def _rs_supported_capabilities(smis_common, system_id, cap): """ Interrogate the supported features of the replication service """ cim_rs = smis_common.cim_rs_of_sys_id(system_id, raise_error=False) if cim_rs: rs_cap = smis_common.Associators( cim_rs.path, AssocClass='CIM_ElementCapabilities', ResultClass='CIM_ReplicationServiceCapabilities', PropertyList=['SupportedReplicationTypes', 'SupportedAsynchronousActions', 'SupportedSynchronousActions'])[0] s_rt = rs_cap['SupportedReplicationTypes'] async_actions = rs_cap['SupportedAsynchronousActions'] sync_actions = rs_cap['SupportedSynchronousActions'] if dmtf.REPLICA_CAP_ACTION_CREATE_ELEMENT in async_actions or \ dmtf.REPLICA_CAP_ACTION_CREATE_ELEMENT in sync_actions: cap.set(Capabilities.VOLUME_REPLICATE) else: return if dmtf.REPLICA_CAP_TYPE_SYNC_SNAPSHOT_LOCAL in s_rt or \ dmtf.REPLICA_CAP_TYPE_ASYNC_SNAPSHOT_LOCAL in s_rt: cap.set(Capabilities.VOLUME_REPLICATE_CLONE) if dmtf.REPLICA_CAP_TYPE_SYNC_CLONE_LOCAL in s_rt or \ dmtf.REPLICA_CAP_TYPE_ASYNC_CLONE_LOCAL in s_rt: cap.set(Capabilities.VOLUME_REPLICATE_COPY) else: # Try older storage configuration service cim_scs = smis_common.cim_scs_of_sys_id(system_id, raise_error=False) if cim_scs: cim_sc_cap = smis_common.Associators( cim_scs.path, AssocClass='CIM_ElementCapabilities', ResultClass='CIM_StorageConfigurationCapabilities', PropertyList=['SupportedCopyTypes'])[0] if cim_sc_cap is not None and 'SupportedCopyTypes' in cim_sc_cap: sct = cim_sc_cap['SupportedCopyTypes'] if sct and len(sct): cap.set(Capabilities.VOLUME_REPLICATE) if dmtf.ST_CONF_CAP_COPY_TYPE_UNSYNC_ASSOC in sct: cap.set(Capabilities.VOLUME_REPLICATE_CLONE) if dmtf.ST_CONF_CAP_COPY_TYPE_UNSYNC_UNASSOC in sct: cap.set(Capabilities.VOLUME_REPLICATE_COPY) def _bsp_cap_set(smis_common, system_id, cap): """ Set capabilities for these methods: volumes() volume_create() volume_resize() volume_delete() """ # CIM_StorageConfigurationService is optional. cim_scs = smis_common.cim_scs_of_sys_id(system_id, raise_error=False) if cim_scs is None: return # These methods are mandatory for CIM_StorageConfigurationService: # CreateOrModifyElementFromStoragePool() # ReturnToStoragePool() # But SNIA never defined which function of # CreateOrModifyElementFromStoragePool() is mandatory. # Hence we check CIM_StorageConfigurationCapabilities # which is mandatory if CIM_StorageConfigurationService is supported. cim_scs_cap = smis_common.Associators( cim_scs.path, AssocClass='CIM_ElementCapabilities', ResultClass='CIM_StorageConfigurationCapabilities', PropertyList=['SupportedAsynchronousActions', 'SupportedSynchronousActions', 'SupportedStorageElementTypes'])[0] element_types = cim_scs_cap['SupportedStorageElementTypes'] sup_actions = [] if 'SupportedSynchronousActions' in cim_scs_cap: if cim_scs_cap['SupportedSynchronousActions']: sup_actions.extend(cim_scs_cap['SupportedSynchronousActions']) if 'SupportedAsynchronousActions' in cim_scs_cap: if cim_scs_cap['SupportedAsynchronousActions']: sup_actions.extend(cim_scs_cap['SupportedAsynchronousActions']) if dmtf.SCS_CAP_SUP_ST_VOLUME in element_types or \ dmtf.SCS_CAP_SUP_THIN_ST_VOLUME in element_types: cap.set(Capabilities.VOLUMES) if dmtf.SCS_CAP_SUP_THIN_ST_VOLUME in element_types: cap.set(Capabilities.VOLUME_THIN) if dmtf.SCS_CAP_VOLUME_CREATE in sup_actions: cap.set(Capabilities.VOLUME_CREATE) if dmtf.SCS_CAP_VOLUME_DELETE in sup_actions: cap.set(Capabilities.VOLUME_DELETE) if dmtf.SCS_CAP_VOLUME_MODIFY in sup_actions: cap.set(Capabilities.VOLUME_RESIZE) return def _disk_cap_set(smis_common, cim_sys_path, cap): if not smis_common.profile_check(SmisCommon.SNIA_DISK_LITE_PROFILE, SmisCommon.SMIS_SPEC_VER_1_4, raise_error=False): return cap.set(Capabilities.DISKS) return def _group_mask_map_cap_set(smis_common, cim_sys_path, cap): """ We set caps for these methods recording to 1.5+ Group M&M profile: access_groups() access_groups_granted_to_volume() volumes_accessible_by_access_group() access_group_initiator_add() access_group_initiator_delete() volume_mask() volume_unmask() access_group_create() access_group_delete() """ # These are mandatory in SNIA SMI-S. # We are not in the position of SNIA SMI-S certification. cap.set(Capabilities.ACCESS_GROUPS) cap.set(Capabilities.ACCESS_GROUPS_GRANTED_TO_VOLUME) cap.set(Capabilities.VOLUMES_ACCESSIBLE_BY_ACCESS_GROUP) cap.set(Capabilities.VOLUME_MASK) if fc_tgt_is_supported(smis_common): cap.set(Capabilities.ACCESS_GROUP_INITIATOR_ADD_WWPN) cap.set(Capabilities.ACCESS_GROUP_CREATE_WWPN) if iscsi_tgt_is_supported(smis_common): cap.set(Capabilities.ACCESS_GROUP_INITIATOR_ADD_ISCSI_IQN) cap.set(Capabilities.ACCESS_GROUP_CREATE_ISCSI_IQN) # RemoveMembers is also mandatory cap.set(Capabilities.ACCESS_GROUP_INITIATOR_DELETE) cim_gmm_cap_pros = [ 'SupportedAsynchronousActions', 'SupportedSynchronousActions', 'SupportedDeviceGroupFeatures'] cim_gmm_cap = smis_common.Associators( cim_sys_path, AssocClass='CIM_ElementCapabilities', ResultClass='CIM_GroupMaskingMappingCapabilities', PropertyList=cim_gmm_cap_pros)[0] # if empty dev group in spc is allowed, RemoveMembers() is enough # to do volume_unmask(). RemoveMembers() is mandatory. if dmtf.GMM_CAP_DEV_MG_ALLOW_EMPTY_W_SPC in \ cim_gmm_cap['SupportedDeviceGroupFeatures']: cap.set(Capabilities.VOLUME_UNMASK) # DeleteMaskingView() is optional, this is required by volume_unmask() # when empty dev group in spc not allowed. elif ((dmtf.GMM_CAP_DELETE_SPC in cim_gmm_cap['SupportedSynchronousActions']) or (dmtf.GMM_CAP_DELETE_SPC in cim_gmm_cap['SupportedAsynchronousActions'])): cap.set(Capabilities.VOLUME_UNMASK) # DeleteGroup is optional, this is required by access_group_delete() if ((dmtf.GMM_CAP_DELETE_GROUP in cim_gmm_cap['SupportedSynchronousActions']) or (dmtf.GMM_CAP_DELETE_GROUP in cim_gmm_cap['SupportedAsynchronousActions'])): cap.set(Capabilities.ACCESS_GROUP_DELETE) return None def _mask_map_cap_set(smis_common, cim_sys_path, cap): """ In SNIA SMI-S 1.4rev6 'Masking and Mapping' profile: CIM_ControllerConfigurationService is mandatory and it's ExposePaths() and HidePaths() are mandatory """ if not smis_common.profile_check(SmisCommon.SNIA_MASK_PROFILE, SmisCommon.SMIS_SPEC_VER_1_4, raise_error=False): return cap.set(Capabilities.ACCESS_GROUPS) cap.set(Capabilities.VOLUME_MASK) cap.set(Capabilities.VOLUME_UNMASK) cap.set(Capabilities.ACCESS_GROUP_INITIATOR_DELETE) cap.set(Capabilities.ACCESS_GROUPS_GRANTED_TO_VOLUME) cap.set(Capabilities.VOLUMES_ACCESSIBLE_BY_ACCESS_GROUP) # EMC VNX does not support CreateStorageHardwareID for iSCSI # and require WWNN for WWPN. Hence both are not supported. if cim_sys_path.classname == 'Clar_StorageSystem': return if fc_tgt_is_supported(smis_common): cap.set(Capabilities.ACCESS_GROUP_INITIATOR_ADD_WWPN) if iscsi_tgt_is_supported(smis_common): cap.set(Capabilities.ACCESS_GROUP_INITIATOR_ADD_ISCSI_IQN) return def _tgt_cap_set(smis_common, cim_sys_path, cap): # LSI MegaRAID actually not support FC Target and iSCSI target, # They expose empty list of CIM_FCPort if cim_sys_path.classname == 'LSIESG_MegaRAIDHBA': return flag_fc_support = fc_tgt_is_supported(smis_common) flag_iscsi_support = iscsi_tgt_is_supported(smis_common) if flag_fc_support or flag_iscsi_support: cap.set(Capabilities.TARGET_PORTS) return def mask_type(smis_common, raise_error=False): """ Return MASK_TYPE_NO_SUPPORT, MASK_TYPE_MASK or MASK_TYPE_GROUP if 'Group Masking and Mapping' profile is supported, return MASK_TYPE_GROUP If raise_error == False, just return MASK_TYPE_NO_SUPPORT or, raise NO_SUPPORT error. """ if smis_common.profile_check(SmisCommon.SNIA_GROUP_MASK_PROFILE, SmisCommon.SMIS_SPEC_VER_1_5, raise_error=False): return MASK_TYPE_GROUP if smis_common.profile_check(SmisCommon.SNIA_MASK_PROFILE, SmisCommon.SMIS_SPEC_VER_1_4, raise_error=False): return MASK_TYPE_MASK if raise_error: raise LsmError(ErrorNumber.NO_SUPPORT, "Target SMI-S provider does not support " "%s version %s or %s version %s" % (SmisCommon.SNIA_MASK_PROFILE, SmisCommon.SMIS_SPEC_VER_1_4, SmisCommon.SNIA_GROUP_MASK_PROFILE, SmisCommon.SMIS_SPEC_VER_1_5)) return MASK_TYPE_NO_SUPPORT def fc_tgt_is_supported(smis_common): """ Return True if FC Target Port 1.4+ profile is supported. """ flag_fc_support = smis_common.profile_check( SmisCommon.SNIA_FC_TGT_PORT_PROFILE, SmisCommon.SMIS_SPEC_VER_1_4, raise_error=False) # One more check for NetApp Typo: # NetApp: 'FC Target Port' # SMI-S: 'FC Target Ports' # Bug reported. if not flag_fc_support: flag_fc_support = smis_common.profile_check( 'FC Target Port', SmisCommon.SMIS_SPEC_VER_1_4, raise_error=False) if flag_fc_support: return True else: return False def iscsi_tgt_is_supported(smis_common): """ Return True if FC Target Port 1.1+ profile is supported. We use CIM_iSCSIProtocolEndpoint as it's a start point we are using in our code of target_ports(). """ if smis_common.profile_check(SmisCommon.SNIA_ISCSI_TGT_PORT_PROFILE, SmisCommon.SMIS_SPEC_VER_1_1, raise_error=False): return True return False def multi_sys_is_supported(smis_common): """ Return True if Multiple ComputerSystem 1.1+ profile is supported. Return False else. """ flag_multi_sys_support = smis_common.profile_check( SmisCommon.SNIA_MULTI_SYS_PROFILE, SmisCommon.SMIS_SPEC_VER_1_1, raise_error=False) if flag_multi_sys_support: return True else: return False def get(smis_common, cim_sys, system): cap = Capabilities() if smis_common.is_netappe(): _rs_supported_capabilities(smis_common, system.id, cap) #TODO We need to investigate why our interrogation code doesn't #work. #The array is telling us one thing, but when we try to use it, it #doesn't work return cap # 'Block Services Package' profile _bsp_cap_set(smis_common, system.id, cap) # 'Disk Drive Lite' profile _disk_cap_set(smis_common, cim_sys.path, cap) # 'Masking and Mapping' and 'Group Masking and Mapping' profiles mt = mask_type(smis_common) if cim_sys.path.classname == 'Clar_StorageSystem': mt = MASK_TYPE_MASK if mask_type == MASK_TYPE_GROUP: _group_mask_map_cap_set(smis_common, cim_sys.path, cap) else: _mask_map_cap_set(smis_common, cim_sys.path, cap) # 'FC Target Ports' and 'iSCSI Target Ports' profiles _tgt_cap_set(smis_common, cim_sys.path, cap) _rs_supported_capabilities(smis_common, system.id, cap) return cap libstoragemgmt-1.2.3/plugin/smispy/smis_sys.py0000664000175000017500000001207712537737032016545 00000000000000## Copyright (C) 2014 Red Hat, Inc. # This library is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2.1 of the License, or any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library; If not, see . # # Author: Gris Ge from utils import merge_list import dmtf from lsm import System, LsmError, ErrorNumber def cim_sys_id_pros(): """ Return the property of CIM_ComputerSystem required to generate lsm.System.id """ return ['Name'] def sys_id_of_cim_sys(cim_sys): if 'Name' in cim_sys: return cim_sys['Name'] else: raise LsmError( ErrorNumber.PLUGIN_BUG, "sys_id_of_cim_sys(): Got a CIM_ComputerSystem does not have " "'Name' property: %s, %s" % (cim_sys.items(), cim_sys.path)) def sys_id_of_cim_vol(cim_vol): if 'SystemName' in cim_vol: return cim_vol['SystemName'] else: raise LsmError( ErrorNumber.PLUGIN_BUG, "sys_id_of_cim_vol(): Got a CIM_StorageVolume does not have " "'SystemName' property: %s, %s" % (cim_vol.items(), cim_vol.path)) def root_cim_sys(smis_common, property_list=None): """ Use this association to find out the root CIM_ComputerSystem: CIM_RegisteredProfile # Root Profile('Array') in interop | | CIM_ElementConformsToProfile v CIM_ComputerSystem # vendor namespace """ id_pros = cim_sys_id_pros() if property_list is None: property_list = id_pros else: property_list = merge_list(property_list, id_pros) cim_syss = [] if smis_common.is_megaraid(): cim_syss = smis_common.EnumerateInstances( 'CIM_ComputerSystem', PropertyList=property_list) else: cim_syss = smis_common.Associators( smis_common.root_blk_cim_rp.path, ResultClass='CIM_ComputerSystem', AssocClass='CIM_ElementConformsToProfile', PropertyList=property_list) if len(cim_syss) == 0: raise LsmError(ErrorNumber.NO_SUPPORT, "Current SMI-S provider does not provide " "the root CIM_ComputerSystem associated " "to 'Array' CIM_RegisteredProfile.") # System URI Filtering if smis_common.system_list: needed_cim_syss = [] for cim_sys in cim_syss: if sys_id_of_cim_sys(cim_sys) in smis_common.system_list: needed_cim_syss.extend([cim_sys]) return needed_cim_syss else: return cim_syss def cim_sys_pros(): """ Return a list of properties required to create a LSM System """ cim_sys_properties = cim_sys_id_pros() cim_sys_properties.extend(['ElementName', 'OperationalStatus']) return cim_sys_properties _LSM_SYS_OP_STATUS_CONV = { dmtf.OP_STATUS_UNKNOWN: System.STATUS_UNKNOWN, dmtf.OP_STATUS_OK: System.STATUS_OK, dmtf.OP_STATUS_ERROR: System.STATUS_ERROR, dmtf.OP_STATUS_DEGRADED: System.STATUS_DEGRADED, dmtf.OP_STATUS_NON_RECOVERABLE_ERROR: System.STATUS_ERROR, dmtf.OP_STATUS_PREDICTIVE_FAILURE: System.STATUS_PREDICTIVE_FAILURE, dmtf.OP_STATUS_SUPPORTING_ENTITY_IN_ERROR: System.STATUS_ERROR, } def _sys_status_of_cim_sys(cim_sys): """ Convert CIM_ComputerSystem['OperationalStatus'] """ if 'OperationalStatus' not in cim_sys: raise LsmError( ErrorNumber.PLUGIN_BUG, "sys_status_of_cim_sys(): Got a CIM_ComputerSystem with no " "OperationalStatus: %s, %s" % (cim_sys.items(), cim_sys.path)) return dmtf.op_status_list_conv( _LSM_SYS_OP_STATUS_CONV, cim_sys['OperationalStatus'], System.STATUS_UNKNOWN, System.STATUS_OTHER) def cim_sys_to_lsm_sys(cim_sys): status = System.STATUS_UNKNOWN status_info = '' if 'OperationalStatus' in cim_sys: (status, status_info) = _sys_status_of_cim_sys(cim_sys) sys_id = sys_id_of_cim_sys(cim_sys) sys_name = cim_sys['ElementName'] return System(sys_id, sys_name, status, status_info) def cim_sys_of_sys_id(smis_common, sys_id, property_list=None): """ Find out the CIM_ComputerSystem for given lsm.System.id using root_cim_sys() """ id_pros = cim_sys_id_pros() if property_list is None: property_list = id_pros else: property_list = merge_list(property_list, id_pros) cim_syss = root_cim_sys(smis_common, property_list) for cim_sys in cim_syss: if sys_id_of_cim_sys(cim_sys) == sys_id: return cim_sys raise LsmError( ErrorNumber.NOT_FOUND_SYSTEM, "Not found System") libstoragemgmt-1.2.3/plugin/smispy/smis_pool.py0000664000175000017500000002355612537754620016706 00000000000000## Copyright (C) 2014 Red Hat, Inc. # This library is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2.1 of the License, or any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library; If not, see . # # Author: Gris Ge from utils import merge_list, path_str_to_cim_path, cim_path_to_path_str import dmtf from lsm import LsmError, ErrorNumber, Pool def cim_pools_of_cim_sys_path(smis_common, cim_sys_path, property_list=None): """ Use this association to get a list of CIM_StoragePool: CIM_ComputerSystem | | (CIM_HostedStoragePool) | v CIM_StoragePool As 'Block Services Package' is mandatory for 'Array' profile which already checked by plugin_register(), we don't do any profile check here. Primordial pool will be eliminated from return list. These pools will be eliminated also: * Spare pool with CIM_StoragePool['Usage'] == dmtf.POOL_USAGE_SPARE * IBM ArrayPool(IBMTSDS_ArrayPool) * IBM ArraySitePool(IBMTSDS_ArraySitePool) """ cim_pools = [] if property_list is None: property_list = ['Primordial', 'Usage'] else: property_list = merge_list(property_list, ['Primordial', 'Usage']) cim_pools = smis_common.Associators( cim_sys_path, AssocClass='CIM_HostedStoragePool', ResultClass='CIM_StoragePool', PropertyList=property_list) rc = [] for cim_pool in cim_pools: if 'Primordial' in cim_pool and cim_pool['Primordial']: continue if 'Usage' in cim_pool and cim_pool['Usage'] == dmtf.POOL_USAGE_SPARE: continue # Skip IBM ArrayPool and ArraySitePool # ArrayPool is holding RAID info. # ArraySitePool is holding 8 disks. Predefined by array. # ArraySite --(1to1 map) --> Array --(1to1 map)--> Rank # By design when user get a ELEMENT_TYPE_POOL only pool, # user can assume he/she can allocate spaces from that pool # to create a new pool with ELEMENT_TYPE_VOLUME or # ELEMENT_TYPE_FS ability. # If we expose them out, we will have two kind of pools # (ArrayPool and ArraySitePool) having element_type & # ELEMENT_TYPE_POOL, but none of them can create a # ELEMENT_TYPE_VOLUME pool. # Only RankPool can create a ELEMENT_TYPE_VOLUME pool. # We are trying to hide the detail to provide a simple # abstraction. if cim_pool.classname == 'IBMTSDS_ArrayPool' or \ cim_pool.classname == 'IBMTSDS_ArraySitePool': continue rc.append(cim_pool) return rc def cim_pool_id_pros(): """ Return a list of CIM_StoragePool properties required to generate lsm.Pool.id """ return ['InstanceID'] def pool_id_of_cim_pool(cim_pool): if 'InstanceID' in cim_pool: return cim_pool['InstanceID'] else: raise LsmError( ErrorNumber.PLUGIN_BUG, "pool_id_of_cim_pool(): Got CIM_StoragePool with no 'InstanceID' " "property: %s, %s" % (cim_pool.items(), cim_pool.path)) def cim_pool_pros(): """ Return a list of CIM_StoragePool properties required to generate lsm.Pool. """ pool_pros = cim_pool_id_pros() pool_pros.extend(['ElementName', 'TotalManagedSpace', 'RemainingManagedSpace', 'Usage', 'OperationalStatus']) return pool_pros def _pool_element_type(smis_common, cim_pool): """ Return a set (Pool.element_type, Pool.unsupported) Using CIM_StorageConfigurationCapabilities 'SupportedStorageElementFeatures' and 'SupportedStorageElementTypes' property. For MegaRAID, just return (Pool.ELEMENT_TYPE_VOLUME, 0) """ if smis_common.is_megaraid(): return Pool.ELEMENT_TYPE_VOLUME | Pool.ELEMENT_TYPE_VOLUME_FULL, 0 element_type = 0 unsupported = 0 # check whether current pool support create volume or not. cim_sccs = smis_common.Associators( cim_pool.path, AssocClass='CIM_ElementCapabilities', ResultClass='CIM_StorageConfigurationCapabilities', PropertyList=['SupportedStorageElementFeatures', 'SupportedStorageElementTypes']) # Associate StorageConfigurationCapabilities to StoragePool # is experimental in SNIA 1.6rev4, Block Book PDF Page 68. # Section 5.1.6 StoragePool, StorageVolume and LogicalDisk # Manipulation, Figure 9 - Capabilities Specific to a StoragePool if len(cim_sccs) == 1: cim_scc = cim_sccs[0] if 'SupportedStorageElementFeatures' in cim_scc: supported_features = cim_scc['SupportedStorageElementFeatures'] supported_types = cim_scc['SupportedStorageElementTypes'] if dmtf.SUPPORT_VOL_CREATE in supported_features: element_type = Pool.ELEMENT_TYPE_VOLUME if dmtf.ELEMENT_THIN_VOLUME in supported_types: element_type |= Pool.ELEMENT_TYPE_VOLUME_THIN if dmtf.ELEMENT_THICK_VOLUME in supported_types: element_type |= Pool.ELEMENT_TYPE_VOLUME_FULL if dmtf.SUPPORT_ELEMENT_EXPAND not in supported_features: unsupported |= Pool.UNSUPPORTED_VOLUME_GROW if dmtf.SUPPORT_ELEMENT_REDUCE not in supported_features: unsupported |= Pool.UNSUPPORTED_VOLUME_SHRINK else: # IBM DS 8000 does not support StorageConfigurationCapabilities # per pool yet. They has been informed. Before fix, use a quick # workaround. # TODO: Currently, we don't have a way to detect # Pool.ELEMENT_TYPE_POOL # but based on knowing definition of each vendor. if cim_pool.classname == 'IBMTSDS_VirtualPool' or \ cim_pool.classname == 'IBMTSDS_ExtentPool': element_type = Pool.ELEMENT_TYPE_VOLUME elif cim_pool.classname == 'IBMTSDS_RankPool': element_type = Pool.ELEMENT_TYPE_POOL elif cim_pool.classname == 'LSIESG_StoragePool': element_type = Pool.ELEMENT_TYPE_VOLUME if 'Usage' in cim_pool: usage = cim_pool['Usage'] if usage == dmtf.POOL_USAGE_UNRESTRICTED: element_type |= Pool.ELEMENT_TYPE_VOLUME if usage == dmtf.POOL_USAGE_RESERVED_FOR_SYSTEM or \ usage > dmtf.POOL_USAGE_DELTA: element_type |= Pool.ELEMENT_TYPE_SYS_RESERVED if usage == dmtf.POOL_USAGE_DELTA: # We blitz all the other elements types for this designation element_type = Pool.ELEMENT_TYPE_DELTA return element_type, unsupported _LSM_POOL_OP_STATUS_CONV = { dmtf.OP_STATUS_OK: Pool.STATUS_OK, dmtf.OP_STATUS_ERROR: Pool.STATUS_ERROR, dmtf.OP_STATUS_DEGRADED: Pool.STATUS_OK | Pool.STATUS_DEGRADED, dmtf.OP_STATUS_NON_RECOVERABLE_ERROR: Pool.STATUS_ERROR, dmtf.OP_STATUS_SUPPORTING_ENTITY_IN_ERROR: Pool.STATUS_ERROR, } def _pool_status_of_cim_pool(dmtf_op_status_list): """ Convert CIM_StoragePool['OperationalStatus'] to LSM """ return dmtf.op_status_list_conv( _LSM_POOL_OP_STATUS_CONV, dmtf_op_status_list, Pool.STATUS_UNKNOWN, Pool.STATUS_OTHER) def cim_pool_to_lsm_pool(smis_common, cim_pool, system_id): """ Return a Pool object base on information of cim_pool. Assuming cim_pool already holding correct properties. """ status_info = '' pool_id = pool_id_of_cim_pool(cim_pool) name = '' total_space = Pool.TOTAL_SPACE_NOT_FOUND free_space = Pool.FREE_SPACE_NOT_FOUND status = Pool.STATUS_OK if 'ElementName' in cim_pool: name = cim_pool['ElementName'] if 'TotalManagedSpace' in cim_pool: total_space = cim_pool['TotalManagedSpace'] if 'RemainingManagedSpace' in cim_pool: free_space = cim_pool['RemainingManagedSpace'] if 'OperationalStatus' in cim_pool: (status, status_info) = _pool_status_of_cim_pool( cim_pool['OperationalStatus']) element_type, unsupported = _pool_element_type(smis_common, cim_pool) plugin_data = cim_path_to_path_str(cim_pool.path) return Pool(pool_id, name, element_type, unsupported, total_space, free_space, status, status_info, system_id, plugin_data) def lsm_pool_to_cim_pool_path(smis_common, lsm_pool): """ Convert lsm.Pool to CIMInstanceName of CIM_StoragePool using lsm.Pool.plugin_data """ if not lsm_pool.plugin_data: raise LsmError( ErrorNumber.PLUGIN_BUG, "Got lsm.Pool instance with empty plugin_data") if smis_common.system_list and \ lsm_pool.system_id not in smis_common.system_list: raise LsmError( ErrorNumber.NOT_FOUND_SYSTEM, "System filtered in URI") return path_str_to_cim_path(lsm_pool.plugin_data) def pool_id_of_cim_vol(smis_common, cim_vol_path): """ Find out the lsm.Pool.id of CIM_StorageVolume """ property_list = cim_pool_id_pros() cim_pools = smis_common.Associators( cim_vol_path, AssocClass='CIM_AllocatedFromStoragePool', ResultClass='CIM_StoragePool', PropertyList=property_list) if len(cim_pools) != 1: raise LsmError( ErrorNumber.PLUGIN_BUG, "pool_id_of_cim_vol(): Got unexpected count(%d) of cim_pool " % len(cim_pools) + "associated to cim_vol: %s, %s" % (cim_vol_path, cim_pools)) return pool_id_of_cim_pool(cim_pools[0]) libstoragemgmt-1.2.3/plugin/smispy/dmtf.py0000664000175000017500000001651312537737032015625 00000000000000# Copyright (C) 2011-2014 Red Hat, Inc. # This library is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2.1 of the License, or any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library; If not, see . # # Author: Gris Ge # This class handle DMTF CIM constants and convert to LSM type. from pywbem import Uint16 # CIM_StorageHardwareID['IDType'] ID_TYPE_OTHER = Uint16(1) ID_TYPE_WWPN = Uint16(2) ID_TYPE_ISCSI = Uint16(5) TGT_PORT_USAGE_FRONTEND_ONLY = Uint16(2) TGT_PORT_USAGE_UNRESTRICTED = Uint16(4) # CIM_FCPort['PortDiscriminator'] FC_PORT_PORT_DISCRIMINATOR_FCOE = Uint16(10) # CIM_NetworkPort['LinkTechnology'] NET_PORT_LINK_TECH_ETHERNET = Uint16(2) # CIM_iSCSIProtocolEndpoint['Role'] ISCSI_TGT_ROLE_TARGET = Uint16(3) # CIM_SCSIProtocolController['NameFormat'] SPC_NAME_FORMAT_ISCSI = Uint16(3) # CIM_IPProtocolEndpoint['IPv6AddressType'] IPV6_ADDR_TYPE_GUA = Uint16(6) # GUA: Global Unicast Address. # 2000::/3 IPV6_ADDR_TYPE_6TO4 = Uint16(7) # IPv6 to IPv4 transition # ::ffff:0:0/96 # ::ffff:0:0:0/96 # 64:ff9b::/96 # well-known prefix # 2002::/16 # 6to4 IPV6_ADDR_TYPE_ULA = Uint16(8) # ULA: Unique Local Address, aka Site Local Unicast. # fc00::/7 # CIM_GroupMaskingMappingService.CreateGroup('Type') MASK_GROUP_TYPE_INIT = Uint16(2) MASK_GROUP_TYPE_TGT = Uint16(3) MASK_GROUP_TYPE_DEV = Uint16(4) # CIM_GroupMaskingMappingCapabilities['SupportedDeviceGroupFeatures'] # Allowing empty DeviceMaskingGroup associated to SPC GMM_CAP_DEV_MG_ALLOW_EMPTY_W_SPC = Uint16(5) # CIM_GroupMaskingMappingCapabilities['SupportedAsynchronousActions'] # and 'SupportedSynchronousActions'. They are using the same value map. GMM_CAP_DELETE_SPC = Uint16(24) GMM_CAP_DELETE_GROUP = Uint16(20) # CIM_StorageConfigurationCapabilities['SupportedStorageElementTypes'] SCS_CAP_SUP_ST_VOLUME = Uint16(2) SCS_CAP_SUP_THIN_ST_VOLUME = Uint16(5) # CIM_StorageConfigurationCapabilities['SupportedAsynchronousActions'] # and also for 'SupportedSynchronousActions' SCS_CAP_VOLUME_CREATE = Uint16(5) SCS_CAP_VOLUME_DELETE = Uint16(6) SCS_CAP_VOLUME_MODIFY = Uint16(7) # DSP 1033 Profile Registration INTEROP_NAMESPACES = ['interop', 'root/interop', 'root/PG_Interop'] DEFAULT_NAMESPACE = 'interop' # DMTF CIM 2.37.0 experimental CIM_StoragePool['Usage'] POOL_USAGE_UNRESTRICTED = 2 POOL_USAGE_RESERVED_FOR_SYSTEM = 3 POOL_USAGE_DELTA = 4 POOL_USAGE_SPARE = 8 # DMTF CIM 2.29.1 CIM_StorageConfigurationCapabilities # ['SupportedStorageElementFeatures'] SUPPORT_VOL_CREATE = 3 SUPPORT_ELEMENT_EXPAND = 12 SUPPORT_ELEMENT_REDUCE = 13 # DMTF CIM 2.37.0 experimental CIM_StorageConfigurationCapabilities # ['SupportedStorageElementTypes'] ELEMENT_THICK_VOLUME = Uint16(2) ELEMENT_THIN_VOLUME = Uint16(5) # DMTF CIM 2.29.1 CIM_StorageConfigurationCapabilities # ['SupportedStoragePoolFeatures'] ST_POOL_FEATURE_INEXTS = 2 ST_POOL_FEATURE_SINGLE_INPOOL = 3 ST_POOL_FEATURE_MULTI_INPOOL = 4 # DMTF CIM 2.38.0+ CIM_StorageSetting['ThinProvisionedPoolType'] THINP_POOL_TYPE_ALLOCATED = Uint16(7) # DMTF Disk Type DISK_TYPE_UNKNOWN = 0 DISK_TYPE_OTHER = 1 DISK_TYPE_HDD = 2 DISK_TYPE_SSD = 3 DISK_TYPE_HYBRID = 4 # CIM_ManagedSystemElement['OperationalStatus'] OP_STATUS_UNKNOWN = 0 OP_STATUS_OTHER = 1 OP_STATUS_OK = 2 OP_STATUS_DEGRADED = 3 OP_STATUS_STRESSED = 4 OP_STATUS_PREDICTIVE_FAILURE = 5 OP_STATUS_ERROR = 6 OP_STATUS_NON_RECOVERABLE_ERROR = 7 OP_STATUS_STARTING = 8 OP_STATUS_STOPPING = 9 OP_STATUS_STOPPED = 10 OP_STATUS_IN_SERVICE = 11 OP_STATUS_NO_CONTACT = 12 OP_STATUS_LOST_COMMUNICATION = 13 OP_STATUS_ABORTED = 14 OP_STATUS_DORMANT = 15 OP_STATUS_SUPPORTING_ENTITY_IN_ERROR = 16 OP_STATUS_COMPLETED = 17 OP_STATUS_POWER_MODE = 18 _OP_STATUS_STR_CONV = { OP_STATUS_UNKNOWN: "UNKNOWN", OP_STATUS_OTHER: "OTHER", OP_STATUS_OK: "OK", OP_STATUS_DEGRADED: "DEGRADED", OP_STATUS_STRESSED: "STRESSED", OP_STATUS_PREDICTIVE_FAILURE: "PREDICTIVE_FAILURE", OP_STATUS_ERROR: "ERROR", OP_STATUS_NON_RECOVERABLE_ERROR: "NON_RECOVERABLE_ERROR", OP_STATUS_STARTING: "STARTING", OP_STATUS_STOPPING: "STOPPING", OP_STATUS_STOPPED: "STOPPED", OP_STATUS_IN_SERVICE: "IN_SERVICE", OP_STATUS_NO_CONTACT: "NO_CONTACT", OP_STATUS_LOST_COMMUNICATION: "LOST_COMMUNICATION", OP_STATUS_ABORTED: "ABORTED", OP_STATUS_DORMANT: "DORMANT", OP_STATUS_SUPPORTING_ENTITY_IN_ERROR: "SUPPORTING_ENTITY_IN_ERROR", OP_STATUS_COMPLETED: "COMPLETED", OP_STATUS_POWER_MODE: "POWER_MODE", } def _op_status_to_str(dmtf_op_status): """ Just convert integer to string. NOT ALLOWING provide a list. Return emtpy string is not found. """ try: return _OP_STATUS_STR_CONV[dmtf_op_status] except KeyError: return '' def op_status_list_conv(conv_dict, dmtf_op_status_list, unknown_value, other_value): status = 0 status_info_list = [] for dmtf_op_status in dmtf_op_status_list: if dmtf_op_status in conv_dict.keys(): status |= conv_dict[dmtf_op_status] else: if dmtf_op_status in _OP_STATUS_STR_CONV.keys(): status |= other_value status_info_list.append(_op_status_to_str(dmtf_op_status)) continue if status == 0: status = unknown_value return status, " ".join(status_info_list) # CIM_ConcreteJob['JobState'] JOB_STATE_NEW = 2 JOB_STATE_STARTING = 3 JOB_STATE_RUNNING = 4 JOB_STATE_COMPLETED = 7 # CIM_Synchronized['SyncType'] also used by # CIM_ReplicationService.CreateElementReplica() 'SyncType' parameter. SYNC_TYPE_MIRROR = Uint16(6) SYNC_TYPE_SNAPSHOT = Uint16(7) SYNC_TYPE_CLONE = Uint16(8) # CIM_Synchronized['Mode'] also used by # CIM_ReplicationService.CreateElementReplica() 'Mode' parameter. REPLICA_MODE_SYNC = Uint16(2) REPLICA_MODE_ASYNC = Uint16(3) # CIM_StorageVolume['NameFormat'] VOL_NAME_FORMAT_NNA = 9 # CIM_StorageVolume['NameNamespace'] VOL_NAME_SPACE_VPD83_TYPE3 = 2 # CIM_ReplicationServiceCapabilities['SupportedAsynchronousActions'] # or CIM_ReplicationServiceCapabilities['SupportedSynchronousActions'] REPLICA_CAP_ACTION_CREATE_ELEMENT = 2 # CIM_ReplicationServiceCapabilities['SupportedReplicationTypes'] REPLICA_CAP_TYPE_SYNC_MIRROR_LOCAL = 2 REPLICA_CAP_TYPE_ASYNC_MIRROR_LOCAL = 3 REPLICA_CAP_TYPE_SYNC_SNAPSHOT_LOCAL = 6 REPLICA_CAP_TYPE_ASYNC_SNAPSHOT_LOCAL = 7 REPLICA_CAP_TYPE_SYNC_CLONE_LOCAL = 10 REPLICA_CAP_TYPE_ASYNC_CLONE_LOCAL = 11 # CIM_Synchronized['CopyState'] COPY_STATE_SYNC = Uint16(4) # CIM_StorageConfigurationCapabilities['SupportedCopyTypes'] ST_CONF_CAP_COPY_TYPE_ASYNC = Uint16(2) ST_CONF_CAP_COPY_TYPE_SYNC = Uint16(3) ST_CONF_CAP_COPY_TYPE_UNSYNC_ASSOC = Uint16(4) ST_CONF_CAP_COPY_TYPE_UNSYNC_UNASSOC = Uint16(5) # CIM_StorageSynchronized['SyncState'] ST_SYNC_STATE_SYNCHRONIZED = 6 # CIM_ControllerConfigurationService.ExposePaths(DeviceAccesses) CTRL_CONF_SRV_DA_RW = Uint16(2) VOL_OTHER_INFO_NAA_VPD83_TYPE3H = 'NAA;VPD83Type3' VOL_USAGE_SYS_RESERVED = Uint16(3) libstoragemgmt-1.2.3/plugin/smispy/smis_common.py0000664000175000017500000006272112537737032017220 00000000000000# Copyright (C) 2014 Red Hat, Inc. # This library is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2.1 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library; If not, see . # # Author: Gris Ge # This file stores: # 1. Constants of SNIA SMI-S. # 2. Methods shared by smis_sys.py and etc: # * Job control # * Profile register # * WBEM actions: enumerate, associations, getinstance and etc. from pywbem import Uint16, CIMError import pywbem import traceback import os import datetime import time import sys import dmtf from lsm import LsmError, ErrorNumber, md5 from utils import (merge_list) def _profile_register_load(wbem_conn): """ Check CIM_RegisteredProfile in interop namespace. Return (profile_dict, root_blk_cim_rp) The 'profile_dict' is a dictionary like this: { # profile_name: max_version 'Array': 1.4, 'Block Service Profile': 1.4, } The 'root_blk_cim_rp' is the 'Array' profile of CIM_RegisteredProfile with highest version number. """ profile_dict = {} root_blk_cim_rp = None namespace_check_list = dmtf.INTEROP_NAMESPACES cim_rps = [] for namespace in namespace_check_list: try: cim_rps = wbem_conn.EnumerateInstances( 'CIM_RegisteredProfile', namespace=namespace, PropertyList=['RegisteredName', 'RegisteredVersion', 'RegisteredOrganization'], LocalOnly=False) except CIMError as e: if e[0] == pywbem.CIM_ERR_NOT_SUPPORTED or \ e[0] == pywbem.CIM_ERR_INVALID_NAMESPACE or \ e[0] == pywbem.CIM_ERR_INVALID_CLASS: pass else: raise if len(cim_rps) != 0: break if len(cim_rps) >= 1: for cim_rp in cim_rps: if cim_rp['RegisteredOrganization'] != \ SmisCommon.SNIA_REG_ORG_CODE: continue profile_name = cim_rp['RegisteredName'] profile_ver = cim_rp['RegisteredVersion'] profile_ver_num = _profile_spec_ver_to_num(profile_ver) if profile_name in profile_dict.keys(): exist_ver_num = _profile_spec_ver_to_num( profile_dict[profile_name]) if exist_ver_num >= profile_ver_num: continue if profile_name == SmisCommon.SNIA_BLK_ROOT_PROFILE: root_blk_cim_rp = cim_rp profile_dict[profile_name] = profile_ver else: raise LsmError( ErrorNumber.NO_SUPPORT, "Target SMI-S provider does not support DMTF DSP1033 profile " "register which is mandatory for LSM") return profile_dict, root_blk_cim_rp def _profile_check(profile_dict, profile_name, spec_ver, raise_error=False): """ Check whether we support certain profile at certain SNIA specification version. Profile spec version later or equal than require spec_ver will also be consider as found. Require profile_dict provided by SmisCommon.profile_register_load() Will raise LsmError(ErrorNumber.NO_SUPPORT, 'xxx') if raise_error is True when nothing found. """ request_ver_num = _profile_spec_ver_to_num(spec_ver) if profile_name not in profile_dict.keys(): if raise_error: raise LsmError( ErrorNumber.NO_SUPPORT, "SNIA SMI-S %s '%s' profile is not supported by " % (profile_name, spec_ver) + "target SMI-S provider") return False support_ver_num = _profile_spec_ver_to_num(profile_dict[profile_name]) if support_ver_num < request_ver_num: if raise_error: raise LsmError( ErrorNumber.NO_SUPPORT, "SNIA SMI-S %s '%s' profile is not supported by " % (profile_name, spec_ver) + "target SMI-S provider. Only version %s is supported" % profile_dict[profile_name]) else: return False return True def _profile_spec_ver_to_num(spec_ver_str): """ Convert version string stored in CIM_RegisteredProfile to a integer. Example: "1.5.1" -> 1,005,001 """ tmp_list = [0, 0, 0] tmp_list = spec_ver_str.split(".") if len(tmp_list) == 2: tmp_list.extend([0]) if len(tmp_list) == 3: return (int(tmp_list[0]) * 10 ** 6 + int(tmp_list[1]) * 10 ** 3 + int(tmp_list[2])) return None class SmisCommon(object): # Even many CIM_XXX_Service in DMTF shared the same return value # definition as SNIA do, but there is no DMTF standard motioned # InvokeMethod() should follow that list of return value. # We use SNIA definition here. # SNIA 1.6 rev4 Block book, BSP 5.5.3.12 Return Values section. SNIA_INVOKE_OK = 0 SNIA_INVOKE_NOT_SUPPORTED = 1 SNIA_INVOKE_FAILED = 4 SNIA_INVOKE_ASYNC = 4096 SNIA_BLK_ROOT_PROFILE = 'Array' SNIA_BLK_SRVS_PROFILE = 'Block Services' SNIA_DISK_LITE_PROFILE = 'Disk Drive Lite' SNIA_MULTI_SYS_PROFILE = 'Multiple Computer System' SNIA_MASK_PROFILE = 'Masking and Mapping' SNIA_GROUP_MASK_PROFILE = 'Group Masking and Mapping' SNIA_FC_TGT_PORT_PROFILE = 'FC Target Ports' SNIA_ISCSI_TGT_PORT_PROFILE = 'iSCSI Target Ports' SNIA_SPARE_DISK_PROFILE = 'Disk Sparing' SMIS_SPEC_VER_1_1 = '1.1' SMIS_SPEC_VER_1_4 = '1.4' SMIS_SPEC_VER_1_5 = '1.5' SMIS_SPEC_VER_1_6 = '1.6' SNIA_REG_ORG_CODE = Uint16(11) _MEGARAID_NAMESPACE = 'root/LsiMr13' _NETAPP_E_NAMESPACE = 'root/LsiArray13' _PRODUCT_MEGARAID = 'LSI MegaRAID' _PRODUCT_NETAPP_E = 'NetApp-E' JOB_RETRIEVE_NONE = 0 JOB_RETRIEVE_VOLUME = 1 JOB_RETRIEVE_VOLUME_CREATE = 2 IAAN_WBEM_HTTP_PORT = 5988 IAAN_WBEM_HTTPS_PORT = 5989 _INVOKE_MAX_LOOP_COUNT = 60 _INVOKE_CHECK_INTERVAL = 5 def __init__(self, url, username, password, namespace=dmtf.DEFAULT_NAMESPACE, no_ssl_verify=False, debug_path=None, system_list=None): self._wbem_conn = None self._profile_dict = {} self.root_blk_cim_rp = None # For root_cim_ self._vendor_product = None # For vendor workaround codes. self.system_list = system_list self._debug_path = debug_path if namespace is None: namespace = dmtf.DEFAULT_NAMESPACE self._wbem_conn = pywbem.WBEMConnection( url, (username, password), namespace) if no_ssl_verify: try: self._wbem_conn = pywbem.WBEMConnection( url, (username, password), namespace, no_verification=True) except TypeError: # pywbem is not holding fix from # https://bugzilla.redhat.com/show_bug.cgi?id=1039801 pass if debug_path is not None: self._wbem_conn.debug = True if namespace.lower() == SmisCommon._MEGARAID_NAMESPACE.lower(): # Skip profile register check on MegaRAID for better performance. # MegaRAID SMI-S profile support status will not change for a while. self._profile_dict = { # Provide a fake profile support status to pass the check. SmisCommon.SNIA_BLK_ROOT_PROFILE: SmisCommon.SMIS_SPEC_VER_1_4, SmisCommon.SNIA_BLK_SRVS_PROFILE: SmisCommon.SMIS_SPEC_VER_1_4, SmisCommon.SNIA_DISK_LITE_PROFILE: SmisCommon.SMIS_SPEC_VER_1_4, } self._vendor_product = SmisCommon._PRODUCT_MEGARAID else: (self._profile_dict, self.root_blk_cim_rp) = \ _profile_register_load(self._wbem_conn) if namespace.lower() == SmisCommon._NETAPP_E_NAMESPACE.lower(): self._vendor_product = SmisCommon._PRODUCT_NETAPP_E # NetApp-E indicates they support 1.0 version of FC/iSCSI target # But 1.0 does not define thoese profiles. Forcly change # support version to 1.4 self._profile_dict[SmisCommon.SNIA_FC_TGT_PORT_PROFILE] = \ SmisCommon.SMIS_SPEC_VER_1_4 self._profile_dict[SmisCommon.SNIA_ISCSI_TGT_PORT_PROFILE] = \ SmisCommon.SMIS_SPEC_VER_1_4 # NetApp-E indicates support of Mask and Mapping 1.2. But # SNIA website link for 1.2 broken. Change it to 1.4. self._profile_dict[SmisCommon.SNIA_MASK_PROFILE] = \ SmisCommon.SMIS_SPEC_VER_1_4 # Check 'Array' 1.4 support status. _profile_check( self._profile_dict, SmisCommon.SNIA_BLK_ROOT_PROFILE, SmisCommon.SMIS_SPEC_VER_1_4, raise_error=True) def profile_check(self, profile_name, spec_ver, raise_error=False): """ Usage: Check whether we support certain profile at certain SNIA specification version or later version. Will raise LsmError(ErrorNumber.NO_SUPPORT, 'xxx') if raise_error is True when nothing found. Parameter: profile_name # SmisCommon.SNIA_XXXX_PROFILE spec_ver # SmisCommon.SMIS_SPEC_VER_XXX raise_error # Raise LsmError if not found Returns: True or False """ return _profile_check( self._profile_dict, profile_name, spec_ver, raise_error) def _vendor_namespace(self): if self.root_blk_cim_rp: cim_syss_path = self._wbem_conn.AssociatorNames( self.root_blk_cim_rp.path, ResultClass='CIM_ComputerSystem', AssocClass='CIM_ElementConformsToProfile') if len(cim_syss_path) == 0: raise LsmError( ErrorNumber.NO_SUPPORT, "Target SMI-S provider does not support any " "CIM_ComputerSystem for SNIA SMI-S '%s' profile" % SmisCommon.SNIA_BLK_ROOT_PROFILE) return cim_syss_path[0].namespace else: raise LsmError( ErrorNumber.PLUGIN_BUG, "_vendor_namespace(): self.root_blk_cim_rp not set yet") def EnumerateInstances(self, ClassName, namespace=None, **params): if self._wbem_conn.default_namespace in dmtf.INTEROP_NAMESPACES: # We have to enumerate in vendor namespace self._wbem_conn.default_namespace = self._vendor_namespace() params['LocalOnly'] = False return self._wbem_conn.EnumerateInstances( ClassName, namespace, **params) def EnumerateInstanceNames(self, ClassName, namespace=None, **params): if self._wbem_conn.default_namespace in dmtf.INTEROP_NAMESPACES: # We have to enumerate in vendor namespace self._wbem_conn.default_namespace = self._vendor_namespace() params['LocalOnly'] = False return self._wbem_conn.EnumerateInstanceNames( ClassName, namespace, **params) def Associators(self, ObjectName, **params): return self._wbem_conn.Associators(ObjectName, **params) def AssociatorNames(self, ObjectName, **params): return self._wbem_conn.AssociatorNames(ObjectName, **params) def GetInstance(self, InstanceName, **params): params['LocalOnly'] = False return self._wbem_conn.GetInstance(InstanceName, **params) def DeleteInstance(self, InstanceName, **params): return self._wbem_conn.DeleteInstance(InstanceName, **params) def References(self, ObjectName, **params): return self._wbem_conn.References(ObjectName, **params) def is_megaraid(self): return self._vendor_product == SmisCommon._PRODUCT_MEGARAID def is_netappe(self): return self._vendor_product == SmisCommon._PRODUCT_NETAPP_E @staticmethod def cim_job_pros(): return ['InstanceID'] def cim_job_of_job_id(self, job_id, property_list=None): """ Return CIM_ConcreteJob for given job_id. """ if property_list is None: property_list = SmisCommon.cim_job_pros() else: property_list = merge_list( property_list, SmisCommon.cim_job_pros()) cim_jobs = self.EnumerateInstances( 'CIM_ConcreteJob', PropertyList=property_list) real_job_id = SmisCommon.parse_job_id(job_id)[0] for cim_job in cim_jobs: if md5(cim_job['InstanceID']) == real_job_id: return cim_job raise LsmError( ErrorNumber.NOT_FOUND_JOB, "Job %s not found" % job_id) @staticmethod def _job_id_of_cim_job(cim_job, retrieve_data, method_data): """ Return the MD5 has of CIM_ConcreteJob['InstanceID'] in conjunction with '@%s' % retrieve_data retrieve_data should be SmisCommon.JOB_RETRIEVE_NONE or SmisCommon.JOB_RETRIEVE_VOLUME or etc method_data is any string a method would like store for error handling by job_status(). """ return "%s@%d@%s" % ( md5(cim_job['InstanceID']), int(retrieve_data), str(method_data)) @staticmethod def parse_job_id(job_id): """ job_id is assembled by a md5 string, retrieve_data and method_data This method will split it and return (md5_str, retrieve_data, method_data) """ tmp_list = job_id.split('@', 3) md5_str = tmp_list[0] retrieve_data = SmisCommon.JOB_RETRIEVE_NONE method_data = None if len(tmp_list) == 3: retrieve_data = int(tmp_list[1]) method_data = tmp_list[2] return (md5_str, retrieve_data, method_data) def _dump_wbem_xml(self, file_prefix): """ When debugging issues with providers it's helpful to have the xml request/reply to give to provider developers. """ try: if self._debug_path is not None: if not os.path.exists(self._debug_path): os.makedirs(self._debug_path) if os.path.isdir(self._debug_path): debug_fn = "%s_%s" % ( file_prefix, datetime.datetime.now().isoformat()) debug_full = os.path.join( self._debug_path, debug_fn) # Dump the request & reply to a file with open(debug_full, 'w') as d: d.write("REQUEST:\n%s\n\nREPLY:\n%s\n" % (self._wbem_conn.last_request, self._wbem_conn.last_reply)) except Exception: # Lets not bother to try and report that we couldn't log the debug # data when we are most likely already in a bad spot pass def invoke_method(self, cmd, cim_path, in_params, out_handler=None, error_handler=None, retrieve_data=None, method_data=None): """ cmd A string of command, example: 'CreateOrModifyElementFromStoragePool' cim_path the CIMInstanceName, example: CIM_StorageConfigurationService.path in_params A dictionary of input parameter, example: {'ElementName': volume_name, 'ElementType': dmtf_element_type, 'InPool': cim_pool_path, 'Size': pywbem.Uint64(size_bytes)} out_handler A reference to a method to parse output, example: self._new_vol_from_name error_handler A reference to a method to handle all exceptions. retrieve_data SmisCommon.JOB_RETRIEVE_XXX, it will be used only when a ASYNC job has been created. method_data A string which will be stored in job_id, it could be used by job_status() to do error checking. """ if retrieve_data is None: retrieve_data = SmisCommon.JOB_RETRIEVE_NONE try: (rc, out) = self._wbem_conn.InvokeMethod( cmd, cim_path, **in_params) # Check to see if operation is done if rc == SmisCommon.SNIA_INVOKE_OK: if out_handler is None: return None, None else: return None, out_handler(out) elif rc == SmisCommon.SNIA_INVOKE_ASYNC: # We have an async operation job_id = SmisCommon._job_id_of_cim_job( out['Job'], retrieve_data, method_data) return job_id, None elif rc == SmisCommon.SNIA_INVOKE_NOT_SUPPORTED: raise LsmError( ErrorNumber.NO_SUPPORT, 'SMI-S error code indicates operation not supported') else: self._dump_wbem_xml(cmd) raise LsmError(ErrorNumber.PLUGIN_BUG, "Error: %s rc= %s" % (cmd, str(rc))) except Exception: exc_info = sys.exc_info() # Make sure to save off current exception as we could cause # another when trying to dump debug data. self._dump_wbem_xml(cmd) if error_handler is not None: error_handler(self, method_data, exc_info) else: raise def invoke_method_wait(self, cmd, cim_path, in_params, out_key=None, expect_class=None, flag_out_array=False): """ InvokeMethod and wait it until done. Return a CIMInstanceName from out[out_key] or from cim_job: CIM_ConcreteJob | | CIM_AffectedJobElement v CIMInstanceName # expect_class If flag_out_array is True, return the first element of out[out_key]. """ (rc, out) = self._wbem_conn.InvokeMethod(cmd, cim_path, **in_params) try: if rc == SmisCommon.SNIA_INVOKE_OK: if out_key is None: return None if out_key in out: if flag_out_array: if len(out[out_key]) == 1: return out[out_key][0] else: raise LsmError( ErrorNumber.PLUGIN_BUG, "invoke_method_wait(), output contains %d " % len(out[out_key]) + "elements: %s" % out[out_key]) return out[out_key] else: raise LsmError(ErrorNumber.PLUGIN_BUG, "invoke_method_wait(), %s not exist " "in out %s" % (out_key, out.items())) elif rc == SmisCommon.SNIA_INVOKE_ASYNC: cim_job_path = out['Job'] loop_counter = 0 job_pros = ['JobState', 'ErrorDescription', 'OperationalStatus'] cim_xxxs_path = [] while(loop_counter <= SmisCommon._INVOKE_MAX_LOOP_COUNT): cim_job = self.GetInstance(cim_job_path, PropertyList=job_pros) job_state = cim_job['JobState'] if job_state in (dmtf.JOB_STATE_NEW, dmtf.JOB_STATE_STARTING, dmtf.JOB_STATE_RUNNING): loop_counter += 1 time.sleep(SmisCommon._INVOKE_CHECK_INTERVAL) continue elif job_state == dmtf.JOB_STATE_COMPLETED: if not SmisCommon.cim_job_completed_ok(cim_job): raise LsmError( ErrorNumber.PLUGIN_BUG, str(cim_job['ErrorDescription'])) if expect_class is None: return None cim_xxxs_path = self.AssociatorNames( cim_job.path, AssocClass='CIM_AffectedJobElement', ResultClass=expect_class) break else: raise LsmError( ErrorNumber.PLUGIN_BUG, "invoke_method_wait(): Got unknown job state " "%d: %s" % (job_state, cim_job.items())) if loop_counter > SmisCommon._INVOKE_MAX_LOOP_COUNT: raise LsmError( ErrorNumber.TIMEOUT, "The job generated by %s() failed to finish in %ds" % (cmd, SmisCommon._INVOKE_CHECK_INTERVAL * SmisCommon._INVOKE_MAX_LOOP_COUNT)) if len(cim_xxxs_path) == 1: return cim_xxxs_path[0] else: raise LsmError( ErrorNumber.PLUGIN_BUG, "invoke_method_wait(): got unexpected(not 1) " "return from CIM_AffectedJobElement: " "%s, out: %s, job: %s" % (cim_xxxs_path, out.items(), cim_job.items())) else: self._dump_wbem_xml(cmd) raise LsmError( ErrorNumber.PLUGIN_BUG, "invoke_method_wait(): Got unexpected rc code " "%d, out: %s" % (rc, out.items())) except Exception: exc_info = sys.exc_info() # Make sure to save off current exception as we could cause # another when trying to dump debug data. self._dump_wbem_xml(cmd) raise exc_info[0], exc_info[1], exc_info[2] def _cim_srv_of_sys_id(self, srv_name, sys_id, raise_error): property_list = ['SystemName'] try: cim_srvs = self.EnumerateInstances( srv_name, PropertyList=property_list) for cim_srv in cim_srvs: if cim_srv['SystemName'] == sys_id: return cim_srv except CIMError: if raise_error: raise else: return None if raise_error: raise LsmError( ErrorNumber.NO_SUPPORT, "Cannot find any '%s' for requested system ID" % srv_name) return None def cim_scs_of_sys_id(self, sys_id, raise_error=True): """ Return a CIMInstance of CIM_StorageConfigurationService for given system id. Using 'SystemName' property as system id of a service which is defined by DMTF CIM_Service. """ return self._cim_srv_of_sys_id( 'CIM_StorageConfigurationService', sys_id, raise_error) def cim_rs_of_sys_id(self, sys_id, raise_error=True): """ Return a CIMInstance of CIM_ReplicationService for given system id. Using 'SystemName' property as system id of a service which is defined by DMTF CIM_Service. """ return self._cim_srv_of_sys_id( 'CIM_ReplicationService', sys_id, raise_error) def cim_gmms_of_sys_id(self, sys_id, raise_error=True): """ Return a CIMInstance of CIM_GroupMaskingMappingService for given system id. Using 'SystemName' property as system id of a service which is defined by DMTF CIM_Service. """ return self._cim_srv_of_sys_id( 'CIM_GroupMaskingMappingService', sys_id, raise_error) def cim_ccs_of_sys_id(self, sys_id, raise_error=True): """ Return a CIMInstance of CIM_ControllerConfigurationService for given system id. Using 'SystemName' property as system id of a service which is defined by DMTF CIM_Service. """ return self._cim_srv_of_sys_id( 'CIM_ControllerConfigurationService', sys_id, raise_error) def cim_hwms_of_sys_id(self, sys_id, raise_error=True): """ Return a CIMInstance of CIM_StorageHardwareIDManagementService for given system id. Using 'SystemName' property as system id of a service which is defined by DMTF CIM_Service. """ return self._cim_srv_of_sys_id( 'CIM_StorageHardwareIDManagementService', sys_id, raise_error) @staticmethod def cim_job_completed_ok(status): """ Given a concrete job instance, check the operational status. This is a little convoluted as different SMI-S proxies return the values in different positions in list :-) """ rc = False op = status['OperationalStatus'] if (len(op) > 1 and ((op[0] == dmtf.OP_STATUS_OK and op[1] == dmtf.OP_STATUS_COMPLETED) or (op[0] == dmtf.OP_STATUS_COMPLETED and op[1] == dmtf.OP_STATUS_OK))): rc = True return rc libstoragemgmt-1.2.3/plugin/smispy/__init__.py0000664000175000017500000000000012537546123016411 00000000000000libstoragemgmt-1.2.3/plugin/smispy/utils.py0000664000175000017500000000630212537737032016026 00000000000000# Copyright (C) 2014 Red Hat, Inc. # This library is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2.1 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library; If not, see . # # Author: Gris Ge import traceback from lsm import (LsmError, ErrorNumber, error) from pywbem import (CIMError, CIMInstanceName) import pywbem import json def merge_list(list_a, list_b): return list(set(list_a + list_b)) def handle_cim_errors(method): def cim_wrapper(*args, **kwargs): try: return method(*args, **kwargs) except LsmError as lsm: raise except CIMError as ce: error_code, desc = ce if error_code == 0: if 'Socket error' in desc: if 'Errno 111' in desc: raise LsmError(ErrorNumber.NETWORK_CONNREFUSED, 'Connection refused') if 'Errno 113' in desc: raise LsmError(ErrorNumber.NETWORK_HOSTDOWN, 'Host is down') elif 'SSL error' in desc: raise LsmError(ErrorNumber.TRANSPORT_COMMUNICATION, desc) elif 'The web server returned a bad status line': raise LsmError(ErrorNumber.TRANSPORT_COMMUNICATION, desc) elif 'HTTP error' in desc: raise LsmError(ErrorNumber.TRANSPORT_COMMUNICATION, desc) raise LsmError(ErrorNumber.PLUGIN_BUG, desc) except pywbem.cim_http.AuthError as ae: raise LsmError(ErrorNumber.PLUGIN_AUTH_FAILED, "Unauthorized user") except pywbem.cim_http.Error as te: raise LsmError(ErrorNumber.NETWORK_ERROR, str(te)) except Exception as e: error("Unexpected exception:\n" + traceback.format_exc()) raise LsmError(ErrorNumber.PLUGIN_BUG, str(e), traceback.format_exc()) return cim_wrapper def hex_string_format(hex_str, length, every): hex_str = hex_str.lower() return ':'.join(hex_str[i:i + every] for i in range(0, length, every)) def cim_path_to_path_str(cim_path): """ Convert CIMInstanceName to a string which could save in plugin_data """ return json.dumps({ 'classname': cim_path.classname, 'keybindings': dict(cim_path.keybindings), 'host': cim_path.host, 'namespace': cim_path.namespace, }) def path_str_to_cim_path(path_str): """ Convert a string into CIMInstanceName. """ path_dict = json.loads(path_str) return CIMInstanceName(**path_dict) libstoragemgmt-1.2.3/plugin/smispy/smis_vol.py0000664000175000017500000001715112537737032016525 00000000000000## Copyright (C) 2014 Red Hat, Inc. # This library is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2.1 of the License, or any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library; If not, see . # # Author: Gris Ge """ This module intends to provide independent methods related to lsm.Volume and CIM_StorageVolume. """ import re import sys from lsm import md5, Volume, LsmError, ErrorNumber from lsm.plugin.smispy.utils import ( merge_list, cim_path_to_path_str, path_str_to_cim_path) from lsm.plugin.smispy import dmtf def cim_vol_id_pros(): """ Return the property of CIM_StorageVolume required to generate lsm.Volume.id """ return ['SystemName', 'DeviceID'] def vol_id_of_cim_vol(cim_vol): """ Get lsm.Volume.id from CIM_StorageVolume['DeviceID'] and ['SystemName'] """ if 'SystemName' not in cim_vol or 'DeviceID' not in cim_vol: raise LsmError( ErrorNumber.PLUGIN_BUG, "vol_id_of_cim_vol(): Got cim_vol with no " "SystemName or DeviceID property: %s, %s" % (cim_vol.path, cim_vol.items())) return md5("%s%s" % (cim_vol['SystemName'], cim_vol['DeviceID'])) def cim_vol_pros(): """ Return the PropertyList required for creating new lsm.Volume. """ props = ['ElementName', 'NameFormat', 'NameNamespace', 'BlockSize', 'NumberOfBlocks', 'Name', 'OtherIdentifyingInfo', 'IdentifyingDescriptions', 'Usage', 'OtherNameFormat', 'OtherNameNamespace'] props.extend(cim_vol_id_pros()) return props def cim_vol_of_cim_pool_path(smis_common, cim_pool_path, property_list=None): """ Use this association to get a list of CIM_StorageVolume: CIM_StoragePool | | CIM_AllocatedFromStoragePool | v CIM_StorageVolume CIM_StorageVolume['Usage'] == dmtf.VOL_USAGE_SYS_RESERVED will be filtered out. Return a list of CIM_StorageVolume. """ if property_list is None: property_list = ['Usage'] else: property_list = merge_list(property_list, ['Usage']) cim_vols = smis_common.Associators( cim_pool_path, AssocClass='CIM_AllocatedFromStoragePool', ResultClass='CIM_StorageVolume', PropertyList=property_list) needed_cim_vols = [] for cim_vol in cim_vols: if 'Usage' not in cim_vol or \ cim_vol['Usage'] != dmtf.VOL_USAGE_SYS_RESERVED: needed_cim_vols.append(cim_vol) return needed_cim_vols def _vpd83_in_cim_vol_name(cim_vol): """ We require NAA Type 3 VPD83 address: Only this is allowed when storing VPD83 in cim_vol["Name"]: * NameFormat = NAA(9), NameNamespace = VPD83Type3(2) """ if not ('NameFormat' in cim_vol and 'NameNamespace' in cim_vol and 'Name' in cim_vol): return None name_format = cim_vol['NameFormat'] name_space = cim_vol['NameNamespace'] name = cim_vol['Name'] if not (name_format and name_space and name): return None if name_format == dmtf.VOL_NAME_FORMAT_NNA and \ name_space == dmtf.VOL_NAME_SPACE_VPD83_TYPE3: return name def _vpd83_in_cim_vol_otherinfo(cim_vol): """ IdentifyingDescriptions[] shall contain "NAA;VPD83Type3". Will return the vpd_83 value if found """ if not ("IdentifyingDescriptions" in cim_vol and "OtherIdentifyingInfo" in cim_vol): return None id_des = cim_vol["IdentifyingDescriptions"] other_info = cim_vol["OtherIdentifyingInfo"] if not (isinstance(cim_vol["IdentifyingDescriptions"], list) and isinstance(cim_vol["OtherIdentifyingInfo"], list)): return None index = 0 len_id_des = len(id_des) len_other_info = len(other_info) while index < min(len_id_des, len_other_info): if dmtf.VOL_OTHER_INFO_NAA_VPD83_TYPE3H == id_des[index]: return other_info[index] index += 1 return None def _vpd83_netapp(cim_vol): """ Workaround for NetApp, they use OtherNameNamespace and OtherNameFormat. """ if 'OtherNameFormat' in cim_vol and \ cim_vol['OtherNameFormat'] == 'NAA' and \ 'OtherNameNamespace' in cim_vol and \ cim_vol['OtherNameNamespace'] == 'VPD83Type3' and \ 'OtherIdentifyingInfo' in cim_vol and \ isinstance(cim_vol["OtherIdentifyingInfo"], list) and \ len(cim_vol['OtherIdentifyingInfo']) == 1: return cim_vol['OtherIdentifyingInfo'][0] def _vpd83_of_cim_vol(cim_vol): """ Extract VPD83 NAA string from CIMInstanceName and convert to LSM format. """ vpd_83 = _vpd83_in_cim_vol_name(cim_vol) if vpd_83 is None: vpd_83 = _vpd83_in_cim_vol_otherinfo(cim_vol) if vpd_83 is None: vpd_83 = _vpd83_netapp(cim_vol) if vpd_83: vpd_83 = vpd_83.lower() if vpd_83 and Volume.vpd83_verify(vpd_83): return vpd_83 else: return '' def cim_vol_to_lsm_vol(cim_vol, pool_id, sys_id): """ Takes a CIMInstance that represents a volume and returns a lsm Volume """ # This is optional (User friendly name) if 'ElementName' in cim_vol: user_name = cim_vol["ElementName"] else: #Better fallback value? user_name = cim_vol['DeviceID'] vpd_83 = _vpd83_of_cim_vol(cim_vol) admin_state = Volume.ADMIN_STATE_ENABLED plugin_data = cim_path_to_path_str(cim_vol.path) return Volume( vol_id_of_cim_vol(cim_vol), user_name, vpd_83, cim_vol["BlockSize"], cim_vol["NumberOfBlocks"], admin_state, sys_id, pool_id, plugin_data) def lsm_vol_to_cim_vol_path(smis_common, lsm_vol): """ Convert lsm.Volume to CIMInstanceName of CIM_StorageVolume using lsm.Volume.plugin_data """ if not lsm_vol.plugin_data: raise LsmError( ErrorNumber.PLUGIN_BUG, "Got lsm.Volume instance with empty plugin_data") if smis_common.system_list and \ lsm_vol.system_id not in smis_common.system_list: raise LsmError( ErrorNumber.NOT_FOUND_SYSTEM, "System filtered in URI") return path_str_to_cim_path(lsm_vol.plugin_data) def volume_name_exists(smis_common, volume_name): """ Try to minimize time to search. :param volume_name: Volume ElementName :return: True if volume exists with 'name', else False """ all_cim_vols = smis_common.EnumerateInstances( 'CIM_StorageVolume', PropertyList=['ElementName']) for exist_cim_vol in all_cim_vols: if volume_name == exist_cim_vol['ElementName']: return True return False def volume_create_error_handler(smis_common, method_data, exec_info=None): """ When we got CIMError, we check whether we got a duplicate volume name. The method_data is the requested volume name. """ if volume_name_exists(smis_common, method_data): raise LsmError(ErrorNumber.NAME_CONFLICT, "Volume with name '%s' already exists!" % method_data) if exec_info is None: (error_type, error_msg, error_trace) = sys.exc_info() else: (error_type, error_msg, error_trace) = exec_info raise error_type, error_msg, error_trace libstoragemgmt-1.2.3/plugin/smispy/smis_ag.py0000664000175000017500000002252612537737032016316 00000000000000## Copyright (C) 2014 Red Hat, Inc. # This library is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2.1 of the License, or any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library; If not, see . # # Author: Gris Ge """ This module intend to provide independent methods for lsm.AccessGroup and volume masking/unmasking. """ from pywbem import CIMError, CIM_ERR_NOT_FOUND from lsm import AccessGroup, md5, LsmError, ErrorNumber from lsm.plugin.smispy.smis_common import SmisCommon from lsm.plugin.smispy import dmtf from lsm.plugin.smispy.utils import cim_path_to_path_str, path_str_to_cim_path _CIM_INIT_PROS = ['StorageID', 'IDType'] def _init_id_and_type_of(cim_inits): """ Retrieve AccessGroup.init_ids and AccessGroup.init_type from a list of CIM_StorageHardwareID. """ init_ids = [] init_type = AccessGroup.INIT_TYPE_UNKNOWN init_types = [] for cim_init in cim_inits: if cim_init['IDType'] == dmtf.ID_TYPE_WWPN: init_ids.append(init_id_of_cim_init(cim_init)) init_types.append(AccessGroup.INIT_TYPE_WWPN) if cim_init['IDType'] == dmtf.ID_TYPE_ISCSI: init_ids.append(init_id_of_cim_init(cim_init)) init_types.append(AccessGroup.INIT_TYPE_ISCSI_IQN) # Skip if not a iscsi initiator IQN or WWPN. continue init_type_dict = {} for cur_init_type in init_types: init_type_dict[cur_init_type] = 1 if len(init_type_dict) == 1: init_type = init_types[0] elif len(init_type_dict) == 2: init_type = AccessGroup.INIT_TYPE_ISCSI_WWPN_MIXED return (init_ids, init_type) def cim_spc_pros(): """ Return the property of CIM_SCSIProtocolController required to generate lsm.AccessGroup 'EMCAdapterRole' is for EMC VNX only. """ return ['DeviceID', 'ElementName', 'StorageID', 'EMCAdapterRole', 'SystemName'] def cim_init_mg_pros(): """ Return the property of CIM_InitiatorMaskingGroup required to generate lsm.AccessGroup """ return ['ElementName', 'InstanceID'] def cim_init_of_cim_spc_path(smis_common, cim_spc_path): """ Return a list of CIM_StorageHardwareID associated to cim_spc. Only contain ['StorageID', 'IDType'] property. Two ways to get StorageHardwareID from SCSIProtocolController: * Method A (defined in SNIA SMIS 1.6): CIM_SCSIProtocolController | | CIM_AssociatedPrivilege v CIM_StorageHardwareID * Method B (defined in SNIA SMIS 1.3, 1.4, 1.5 and 1.6): CIM_SCSIProtocolController | | CIM_AuthorizedTarget v CIM_AuthorizedPrivilege | | CIM_AuthorizedSubject v CIM_StorageHardwareID """ cim_inits = [] if smis_common.profile_check(SmisCommon.SNIA_MASK_PROFILE, SmisCommon.SMIS_SPEC_VER_1_6, raise_error=False): try: cim_inits = smis_common.Associators( cim_spc_path, AssocClass='CIM_AssociatedPrivilege', ResultClass='CIM_StorageHardwareID', PropertyList=_CIM_INIT_PROS) except CIMError as cim_error: if cim_error[0] == CIM_ERR_NOT_FOUND: pass else: raise if len(cim_inits) == 0: cim_aps_path = smis_common.AssociatorNames( cim_spc_path, AssocClass='CIM_AuthorizedTarget', ResultClass='CIM_AuthorizedPrivilege') for cim_ap_path in cim_aps_path: cim_inits.extend(smis_common.Associators( cim_ap_path, AssocClass='CIM_AuthorizedSubject', ResultClass='CIM_StorageHardwareID', PropertyList=_CIM_INIT_PROS)) return cim_inits def cim_spc_to_lsm_ag(smis_common, cim_spc, system_id): """ Convert CIM_SCSIProtocolController to lsm.AccessGroup """ ag_id = md5(cim_spc['DeviceID']) ag_name = cim_spc['ElementName'] cim_inits = cim_init_of_cim_spc_path(smis_common, cim_spc.path) (init_ids, init_type) = _init_id_and_type_of(cim_inits) plugin_data = cim_path_to_path_str(cim_spc.path) return AccessGroup( ag_id, ag_name, init_ids, init_type, system_id, plugin_data) def cim_init_of_cim_init_mg_path(smis_common, cim_init_mg_path): """ Use this association to get a list of CIM_StorageHardwareID: CIM_InitiatorMaskingGroup | | CIM_MemberOfCollection v CIM_StorageHardwareID Only contain ['StorageID', 'IDType'] property. """ return smis_common.Associators( cim_init_mg_path, AssocClass='CIM_MemberOfCollection', ResultClass='CIM_StorageHardwareID', PropertyList=_CIM_INIT_PROS) def cim_init_mg_to_lsm_ag(smis_common, cim_init_mg, system_id): """ Convert CIM_InitiatorMaskingGroup to lsm.AccessGroup """ ag_name = cim_init_mg['ElementName'] ag_id = md5(cim_init_mg['InstanceID']) cim_inits = cim_init_of_cim_init_mg_path(smis_common, cim_init_mg.path) (init_ids, init_type) = _init_id_and_type_of(cim_inits) plugin_data = cim_path_to_path_str(cim_init_mg.path) return AccessGroup( ag_id, ag_name, init_ids, init_type, system_id, plugin_data) def lsm_ag_to_cim_spc_path(smis_common, lsm_ag): """ Convert lsm.AccessGroup to CIMInstanceName of CIM_SCSIProtocolController using lsm.AccessGroup.plugin_data. This method does not check whether plugin_data is cim_spc or cim_init_mg, caller should make sure that. """ if not lsm_ag.plugin_data: raise LsmError( ErrorNumber.PLUGIN_BUG, "Got lsm.AccessGroup instance with empty plugin_data") if smis_common.system_list and \ lsm_ag.system_id not in smis_common.system_list: raise LsmError( ErrorNumber.NOT_FOUND_SYSTEM, "System filtered in URI") return path_str_to_cim_path(lsm_ag.plugin_data) def lsm_ag_to_cim_init_mg_path(smis_common, lsm_ag): """ Convert lsm.AccessGroup to CIMInstanceName of CIM_InitiatorMaskingGroup using lsm.AccessGroup.plugin_data. This method does not check whether plugin_data is cim_spc or cim_init_mg, caller should make sure that. """ return lsm_ag_to_cim_spc_path(smis_common, lsm_ag) def init_id_of_cim_init(cim_init): """ Return CIM_StorageHardwareID['StorageID'] """ if 'StorageID' in cim_init: return cim_init['StorageID'] raise LsmError( ErrorNumber.PLUGIN_BUG, "init_id_of_cim_init() got cim_init without 'StorageID' %s: %s" % (cim_init.path, cim_init.items())) def lsm_init_id_to_snia(lsm_init_id): """ If lsm_init_id is a WWPN, convert it to SNIA format: [0-9A-F]{16} If not, return original directly. """ val, init_type, init_id = AccessGroup.initiator_id_verify(lsm_init_id) if val and init_type == AccessGroup.INIT_TYPE_WWPN: return lsm_init_id.replace(':', '').upper() return lsm_init_id def cim_init_path_check_or_create(smis_common, system_id, init_id, init_type): """ Check whether CIM_StorageHardwareID exists, if not, create new one. """ cim_inits = smis_common.EnumerateInstances( 'CIM_StorageHardwareID', PropertyList=_CIM_INIT_PROS) if len(cim_inits): for cim_init in cim_inits: if init_id_of_cim_init(cim_init) == init_id: return cim_init.path # Create new one dmtf_id_type = None if init_type == AccessGroup.INIT_TYPE_WWPN: dmtf_id_type = dmtf.ID_TYPE_WWPN elif init_type == AccessGroup.INIT_TYPE_ISCSI_IQN: dmtf_id_type = dmtf.ID_TYPE_ISCSI else: raise LsmError( ErrorNumber.PLUGIN_BUG, "cim_init_path_check_or_create(): Got invalid init_type: %d" % init_type) cim_hwms = smis_common.cim_hwms_of_sys_id(system_id) in_params = { 'StorageID': init_id, 'IDType': dmtf_id_type, } return smis_common.invoke_method_wait( 'CreateStorageHardwareID', cim_hwms.path, in_params, out_key='HardwareID', expect_class='CIM_StorageHardwareID') def cim_vols_masked_to_cim_spc_path(smis_common, cim_spc_path, property_list=None): """ Use this association to find out masked volume for certain cim_spc: CIM_SCSIProtocolController | | CIM_ProtocolControllerForUnit v CIM_StorageVolume Return a list of CIMInstance """ if property_list is None: property_list = [] return smis_common.Associators( cim_spc_path, AssocClass='CIM_ProtocolControllerForUnit', ResultClass='CIM_StorageVolume', PropertyList=property_list) libstoragemgmt-1.2.3/plugin/smispy/smis_disk.py0000664000175000017500000002025212537737032016653 00000000000000## Copyright (C) 2014 Red Hat, Inc. # This library is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2.1 of the License, or any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library; If not, see . # # Author: Gris Ge from lsm import Disk, md5, LsmError, ErrorNumber import dmtf from utils import merge_list from pywbem import CIM_ERR_NOT_FOUND, CIM_ERR_INVALID_PARAMETER from lsm.plugin.smispy.smis_common import SmisCommon _LSM_DISK_OP_STATUS_CONV = { dmtf.OP_STATUS_UNKNOWN: Disk.STATUS_UNKNOWN, dmtf.OP_STATUS_OK: Disk.STATUS_OK, dmtf.OP_STATUS_PREDICTIVE_FAILURE: Disk.STATUS_PREDICTIVE_FAILURE, dmtf.OP_STATUS_ERROR: Disk.STATUS_ERROR, dmtf.OP_STATUS_NON_RECOVERABLE_ERROR: Disk.STATUS_ERROR, dmtf.OP_STATUS_STARTING: Disk.STATUS_STARTING, dmtf.OP_STATUS_STOPPING: Disk.STATUS_STOPPING, dmtf.OP_STATUS_STOPPED: Disk.STATUS_STOPPED, } def _disk_status_of_cim_disk(cim_disk): """ Convert CIM_DiskDrive['OperationalStatus'] to LSM Only return status, no status_info """ if 'OperationalStatus' not in cim_disk: return Disk.STATUS_UNKNOWN return dmtf.op_status_list_conv( _LSM_DISK_OP_STATUS_CONV, cim_disk['OperationalStatus'], Disk.STATUS_UNKNOWN, Disk.STATUS_OTHER)[0] _DMTF_DISK_TYPE_2_LSM = { dmtf.DISK_TYPE_UNKNOWN: Disk.TYPE_UNKNOWN, dmtf.DISK_TYPE_OTHER: Disk.TYPE_OTHER, dmtf.DISK_TYPE_HDD: Disk.TYPE_HDD, dmtf.DISK_TYPE_SSD: Disk.TYPE_SSD, dmtf.DISK_TYPE_HYBRID: Disk.TYPE_HYBRID, } def _dmtf_disk_type_2_lsm_disk_type(dmtf_disk_type): if dmtf_disk_type in _DMTF_DISK_TYPE_2_LSM.keys(): return _DMTF_DISK_TYPE_2_LSM[dmtf_disk_type] else: return Disk.TYPE_UNKNOWN def _disk_id_of_cim_disk(cim_disk): if 'SystemName' not in cim_disk or \ 'DeviceID' not in cim_disk: raise LsmError( ErrorNumber.PLUGIN_BUG, "_disk_id_of_cim_disk(): Got cim_disk with no " "SystemName or DeviceID property: %s, %s" % (cim_disk.path, cim_disk.items())) return md5("%s%s" % (cim_disk['SystemName'], cim_disk['DeviceID'])) def cim_disk_pros(): """ Return all CIM_DiskDrive Properties needed to create a Disk object. The 'Type' and 'MediaType' is only for MegaRAID. The 'EMCInUse' is only for EMC. """ return ['OperationalStatus', 'Name', 'SystemName', 'Caption', 'InterconnectType', 'DiskType', 'DeviceID', 'Type', 'MediaType', 'EMCInUse'] def sys_id_of_cim_disk(cim_disk): if 'SystemName' not in cim_disk: raise LsmError( ErrorNumber.PLUGIN_BUG, "sys_id_of_cim_disk(): Got cim_disk with no " "SystemName property: %s, %s" % (cim_disk.path, cim_disk.items())) return cim_disk['SystemName'] def _pri_cim_ext_of_cim_disk(smis_common, cim_disk_path, property_list=None): """ Usage: Find out the Primordial CIM_StorageExtent of CIM_DiskDrive In SNIA SMI-S 1.4 rev.6 Block book, section 11.1.1 'Base Model' quote: A disk drive is modeled as a single MediaAccessDevice (DiskDrive) That shall be linked to a single StorageExtent (representing the storage in the drive) by a MediaPresent association. The StorageExtent class represents the storage of the drive and contains its size. Parameter: cim_disk_path # CIM_InstanceName of CIM_DiskDrive property_list # a List of properties needed on returned # CIM_StorageExtent Returns: cim_pri_ext # The CIM_Instance of Primordial CIM_StorageExtent Exceptions: LsmError ErrorNumber.LSM_PLUGIN_BUG # Failed to find out pri cim_ext """ if property_list is None: property_list = ['Primordial'] else: property_list = merge_list(property_list, ['Primordial']) cim_exts = smis_common.Associators( cim_disk_path, AssocClass='CIM_MediaPresent', ResultClass='CIM_StorageExtent', PropertyList=property_list) cim_exts = [p for p in cim_exts if p["Primordial"]] if len(cim_exts) == 1: # As SNIA commanded, only _ONE_ Primordial CIM_StorageExtent for # each CIM_DiskDrive return cim_exts[0] else: raise LsmError(ErrorNumber.PLUGIN_BUG, "_pri_cim_ext_of_cim_disk(): " "Got unexpected count of Primordial " + "CIM_StorageExtent for CIM_DiskDrive: %s, %s " % (cim_disk_path, cim_exts)) # LSIESG_DiskDrive['MediaType'] # Value was retrieved from MOF file of MegaRAID SMI-S provider. _MEGARAID_DISK_MEDIA_TYPE_SSD = 1 _MEGARAID_DISK_MEDIA_TYPE_SSD_FLASH = 2 # LSIESG_DiskDrive['Type'] # Value was retrieved from LSI engineer with content of LGPL2.1+ license. _MEGARAID_DISK_TYPE_SCSI = 1 _MEGARAID_DISK_TYPE_SAS = 2 _MEGARAID_DISK_TYPE_SATA = 3 _MEGARAID_DISK_TYPE_FC = 4 def _disk_type_megaraid(cim_disk): if cim_disk['MediaType'] == _MEGARAID_DISK_MEDIA_TYPE_SSD or \ cim_disk['MediaType'] == _MEGARAID_DISK_MEDIA_TYPE_SSD_FLASH: return Disk.TYPE_SSD else: if int(cim_disk['Type']) == _MEGARAID_DISK_TYPE_SCSI: return Disk.TYPE_SCSI elif int(cim_disk['Type']) == _MEGARAID_DISK_TYPE_SAS: return Disk.TYPE_SAS elif int(cim_disk['Type']) == _MEGARAID_DISK_TYPE_SATA: return Disk.TYPE_SATA elif int(cim_disk['Type']) == _MEGARAID_DISK_TYPE_FC: return Disk.TYPE_FC return Disk.TYPE_UNKNOWN def cim_disk_to_lsm_disk(smis_common, cim_disk): """ Convert CIM_DiskDrive to lsm.Disk. """ # CIM_DiskDrive does not have disk size information. # We have to find out the Primordial CIM_StorageExtent for that. cim_ext = _pri_cim_ext_of_cim_disk( smis_common, cim_disk.path, property_list=['BlockSize', 'NumberOfBlocks']) status = _disk_status_of_cim_disk(cim_disk) if smis_common.profile_check(SmisCommon.SNIA_SPARE_DISK_PROFILE, SmisCommon.SMIS_SPEC_VER_1_4, raise_error=False): cim_srss = smis_common.AssociatorNames( cim_ext.path, AssocClass='CIM_IsSpare', ResultClass='CIM_StorageRedundancySet') if len(cim_srss) >= 1: status |= Disk.STATUS_SPARE_DISK if 'EMCInUse' in cim_disk.keys() and cim_disk['EMCInUse'] is False: status |= Disk.STATUS_FREE name = '' block_size = Disk.BLOCK_SIZE_NOT_FOUND num_of_block = Disk.BLOCK_COUNT_NOT_FOUND disk_type = Disk.TYPE_UNKNOWN sys_id = sys_id_of_cim_disk(cim_disk) # These are mandatory # we do not check whether they follow the SNIA standard. if 'Name' in cim_disk: name = cim_disk["Name"] if 'BlockSize' in cim_ext: block_size = cim_ext['BlockSize'] if 'NumberOfBlocks' in cim_ext: num_of_block = cim_ext['NumberOfBlocks'] if smis_common.is_megaraid(): disk_type = _disk_type_megaraid(cim_disk) else: # SNIA SMI-S 1.4 or even 1.6 does not define anyway to find out disk # type. # Currently, EMC is following DMTF define to do so. if 'InterconnectType' in cim_disk: # DMTF 2.31 CIM_DiskDrive disk_type = cim_disk['InterconnectType'] if 'Caption' in cim_disk: # EMC VNX introduced NL_SAS disk. if cim_disk['Caption'] == 'NL_SAS': disk_type = Disk.TYPE_NL_SAS if disk_type == Disk.TYPE_UNKNOWN and 'DiskType' in cim_disk: disk_type = _dmtf_disk_type_2_lsm_disk_type(cim_disk['DiskType']) disk_id = _disk_id_of_cim_disk(cim_disk) return Disk(disk_id, name, disk_type, block_size, num_of_block, status, sys_id) libstoragemgmt-1.2.3/plugin/smispy/smis.py0000664000175000017500000023603712537737032015653 00000000000000# Copyright (C) 2011-2014 Red Hat, Inc. # This library is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2.1 of the License, or any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library; If not, see . # # Author: tasleson # Gris Ge from string import split import time import copy import os import re import pywbem from pywbem import CIMError import smis_cap import smis_sys import smis_pool import smis_disk from lsm.plugin.smispy import smis_vol from lsm.plugin.smispy import smis_ag import dmtf from lsm import (IStorageAreaNetwork, uri_parse, LsmError, ErrorNumber, JobStatus, md5, Volume, AccessGroup, Pool, VERSION, TargetPort, search_property) from utils import (merge_list, handle_cim_errors, hex_string_format) from smis_common import SmisCommon ## Variable Naming scheme: # cim_xxx CIMInstance # cim_xxx_path CIMInstanceName # cim_sys CIM_ComputerSystem (root or leaf) # cim_pool CIM_StoragePool # cim_scs CIM_StorageConfigurationService # cim_vol CIM_StorageVolume # cim_rp CIM_RegisteredProfile # cim_init CIM_StorageHardwareID # cim_spc CIM_SCSIProtocolController # cim_init_mg CIM_InitiatorMaskingGroup # cim_fc_tgt CIM_FCPort # cim_iscsi_pg CIM_iSCSIProtocolEndpoint # iSCSI portal group # cim_iscsi_node CIM_SCSIProtocolController # cim_tcp CIM_TCPProtocolEndpoint, # cim_ip CIM_IPProtocolEndpoint # cim_eth CIM_EthernetPort # cim_pe CIM_SCSIProtocolEndpoint # cim_gmms CIM_GroupMaskingMappingService # cim_ccs CIM_ControllerConfigurationService # cim_rs CIM_ReplicationService # cim_hwms CIM_StorageHardwareIDManagementService # # sys Object of LSM System # pool Object of LSM Pool # vol Object of LSM Volume ## Method Naming scheme: # _cim_xxx() # Return CIMInstance without any Associations() call. # _cim_xxx_of(cim_yyy) # Return CIMInstance associated to cim_yyy # _adj_cim_xxx() # Return CIMInstance with 'adj' only # _cim_xxx_of_id(some_id) # Return CIMInstance for given ID # Terminology # SPC CIM_SCSIProtocolController # BSP SNIA SMI-S 'Block Services Package' profile # Group M&M SNIA SMI-S 'Group Masking and Mapping' profile def _lsm_tgt_port_type_of_cim_fc_tgt(cim_fc_tgt): """ We are assuming we got CIM_FCPort. Caller should make sure of that. Return TargetPool.PORT_TYPE_FC as fallback """ # In SNIA SMI-S 1.6.1 public draft 2, 'PortDiscriminator' is mandatory # for FCoE target port. if 'PortDiscriminator' in cim_fc_tgt and \ cim_fc_tgt['PortDiscriminator'] and \ dmtf.FC_PORT_PORT_DISCRIMINATOR_FCOE in cim_fc_tgt['PortDiscriminator']: return TargetPort.TYPE_FCOE if 'LinkTechnology' in cim_fc_tgt and \ cim_fc_tgt['LinkTechnology'] == dmtf.NET_PORT_LINK_TECH_ETHERNET: return TargetPort.TYPE_FCOE return TargetPort.TYPE_FC class Smis(IStorageAreaNetwork): """ SMI-S plug-ing which exposes a small subset of the overall provided functionality of SMI-S """ _JOB_ERROR_HANDLER = { SmisCommon.JOB_RETRIEVE_VOLUME_CREATE: smis_vol.volume_create_error_handler, } def __init__(self): self._c = None self.tmo = 0 @handle_cim_errors def plugin_register(self, uri, password, timeout, flags=0): """ Called when the plug-in runner gets the start request from the client. Checkout interop support status via: 1. Enumerate CIM_RegisteredProfile in 'interop' namespace. 2. if nothing found, then Enumerate CIM_RegisteredProfile in 'root/interop' namespace. 3. if nothing found, then Enumerate CIM_RegisteredProfile in user defined namespace. """ protocol = 'http' port = SmisCommon.IAAN_WBEM_HTTP_PORT u = uri_parse(uri, ['scheme', 'netloc', 'host'], None) if u['scheme'].lower() == 'smispy+ssl': protocol = 'https' port = SmisCommon.IAAN_WBEM_HTTPS_PORT if 'port' in u: port = u['port'] url = "%s://%s:%s" % (protocol, u['host'], port) # System filtering system_list = None if 'systems' in u['parameters']: system_list = split(u['parameters']["systems"], ":") namespace = None if 'namespace' in u['parameters']: namespace = u['parameters']['namespace'] no_ssl_verify = False if "no_ssl_verify" in u["parameters"] \ and u["parameters"]["no_ssl_verify"] == 'yes': no_ssl_verify = True debug_path = None if 'debug_path' in u['parameters']: debug_path = u['parameters']['debug_path'] self._c = SmisCommon( url, u['username'], password, namespace, no_ssl_verify, debug_path, system_list) self.tmo = timeout @handle_cim_errors def time_out_set(self, ms, flags=0): self.tmo = ms @handle_cim_errors def time_out_get(self, flags=0): return self.tmo @handle_cim_errors def plugin_unregister(self, flags=0): self._c = None @handle_cim_errors def capabilities(self, system, flags=0): cim_sys = smis_sys.cim_sys_of_sys_id(self._c, system.id) return smis_cap.get(self._c, cim_sys, system) @handle_cim_errors def plugin_info(self, flags=0): return "Generic SMI-S support", VERSION @handle_cim_errors def job_status(self, job_id, flags=0): """ Given a job id returns the current status as a tuple (status (enum), percent_complete(integer), volume (None or Volume)) """ completed_item = None error_handler = None (ignore, retrieve_data, method_data) = SmisCommon.parse_job_id(job_id) if retrieve_data in Smis._JOB_ERROR_HANDLER.keys(): error_handler = Smis._JOB_ERROR_HANDLER[retrieve_data] cim_job_pros = SmisCommon.cim_job_pros() cim_job_pros.extend( ['JobState', 'PercentComplete', 'ErrorDescription', 'OperationalStatus']) cim_job = self._c.cim_job_of_job_id(job_id, cim_job_pros) job_state = cim_job['JobState'] try: if job_state in (dmtf.JOB_STATE_NEW, dmtf.JOB_STATE_STARTING, dmtf.JOB_STATE_RUNNING): status = JobStatus.INPROGRESS pc = cim_job['PercentComplete'] if pc > 100: percent_complete = 100 else: percent_complete = pc elif job_state == dmtf.JOB_STATE_COMPLETED: status = JobStatus.COMPLETE percent_complete = 100 if SmisCommon.cim_job_completed_ok(cim_job): if retrieve_data == SmisCommon.JOB_RETRIEVE_VOLUME or \ retrieve_data == SmisCommon.JOB_RETRIEVE_VOLUME_CREATE: completed_item = self._new_vol_from_job(cim_job) else: raise LsmError( ErrorNumber.PLUGIN_BUG, str(cim_job['ErrorDescription'])) else: raise LsmError( ErrorNumber.PLUGIN_BUG, str(cim_job['ErrorDescription'])) except Exception: if error_handler is not None: error_handler(self._c, method_data) else: raise return status, percent_complete, completed_item def _new_vol_from_name(self, out): """ Given a volume by CIMInstanceName, return a lsm Volume object """ cim_vol = None cim_vol_pros = smis_vol.cim_vol_pros() if 'TheElement' in out: cim_vol = self._c.GetInstance( out['TheElement'], PropertyList=cim_vol_pros) elif 'TargetElement' in out: cim_vol = self._c.GetInstance( out['TargetElement'], PropertyList=cim_vol_pros) pool_id = smis_pool.pool_id_of_cim_vol(self._c, cim_vol.path) sys_id = smis_sys.sys_id_of_cim_vol(cim_vol) return smis_vol.cim_vol_to_lsm_vol(cim_vol, pool_id, sys_id) def _new_vol_from_job(self, job): """ Given a concrete job instance, return referenced volume as lsm volume """ cim_vol_pros = smis_vol.cim_vol_pros() cim_vols = [] # Workaround for HP 3PAR: # When doing volume-replicate for 'COPY" type, Associators() will # return [None] if PropertyList defined. It works well # for CLONE type. if job.path.classname == 'TPD_ConcreteJob': cim_vols = self._c.Associators( job.path, AssocClass='CIM_AffectedJobElement', ResultClass='CIM_StorageVolume') else: cim_vols = self._c.Associators( job.path, AssocClass='CIM_AffectedJobElement', ResultClass='CIM_StorageVolume', PropertyList=cim_vol_pros) for cim_vol in cim_vols: pool_id = smis_pool.pool_id_of_cim_vol(self._c, cim_vol.path) sys_id = smis_sys.sys_id_of_cim_vol(cim_vol) return smis_vol.cim_vol_to_lsm_vol(cim_vol, pool_id, sys_id) return None @handle_cim_errors def volumes(self, search_key=None, search_value=None, flags=0): """ Return all volumes. We are basing on "Block Services Package" profile version 1.4 or later: CIM_ComputerSystem | | (CIM_HostedStoragePool) | v CIM_StoragePool | | (CIM_AllocatedFromStoragePool) | v CIM_StorageVolume As 'Block Services Package' is mandatory for 'Array' profile, we don't check support status here as startup() already checked 'Array' profile. """ rc = [] cim_sys_pros = smis_sys.cim_sys_id_pros() cim_syss = smis_sys.root_cim_sys(self._c, cim_sys_pros) cim_vol_pros = smis_vol.cim_vol_pros() for cim_sys in cim_syss: sys_id = smis_sys.sys_id_of_cim_sys(cim_sys) pool_pros = smis_pool.cim_pool_id_pros() cim_pools = smis_pool.cim_pools_of_cim_sys_path( self._c, cim_sys.path, pool_pros) for cim_pool in cim_pools: pool_id = smis_pool.pool_id_of_cim_pool(cim_pool) cim_vols = smis_vol.cim_vol_of_cim_pool_path( self._c, cim_pool.path, cim_vol_pros) for cim_vol in cim_vols: rc.append( smis_vol.cim_vol_to_lsm_vol(cim_vol, pool_id, sys_id)) return search_property(rc, search_key, search_value) @handle_cim_errors def pools(self, search_key=None, search_value=None, flags=0): """ Convert CIM_StoragePool to lsm.Pool. To list all CIM_StoragePool: 1. List all root CIM_ComputerSystem. 2. List all CIM_StoragePool associated to CIM_ComputerSystem. """ rc = [] cim_pool_pros = smis_pool.cim_pool_pros() cim_sys_pros = smis_sys.cim_sys_id_pros() cim_syss = smis_sys.root_cim_sys(self._c, cim_sys_pros) for cim_sys in cim_syss: system_id = smis_sys.sys_id_of_cim_sys(cim_sys) cim_pools = smis_pool.cim_pools_of_cim_sys_path( self._c, cim_sys.path, cim_pool_pros) for cim_pool in cim_pools: rc.append( smis_pool.cim_pool_to_lsm_pool( self._c, cim_pool, system_id)) return search_property(rc, search_key, search_value) @handle_cim_errors def systems(self, flags=0): """ Return the storage arrays accessible from this plug-in at this time As 'Block Services Package' is mandatory for 'Array' profile, we don't check support status here as startup() already checked 'Array' profile. """ cim_sys_pros = smis_sys.cim_sys_pros() cim_syss = smis_sys.root_cim_sys(self._c, cim_sys_pros) return [smis_sys.cim_sys_to_lsm_sys(s) for s in cim_syss] @handle_cim_errors def volume_create(self, pool, volume_name, size_bytes, provisioning, flags=0): """ Create a volume. """ # Use user provide lsm.Pool.element_type to speed thing up. if not Pool.ELEMENT_TYPE_VOLUME & pool.element_type: raise LsmError( ErrorNumber.NO_SUPPORT, "Pool not suitable for creating volumes") # Use THICK volume by default unless unsupported or user requested. dmtf_element_type = dmtf.ELEMENT_THICK_VOLUME if provisioning == Volume.PROVISION_DEFAULT: # Prefer thick/full volume unless only thin volume supported. # HDS AMS only support thin volume in their thin pool. if not Pool.ELEMENT_TYPE_VOLUME_FULL & pool.element_type and \ Pool.ELEMENT_TYPE_VOLUME_THIN & pool.element_type: dmtf_element_type = dmtf.ELEMENT_THIN_VOLUME else: # User is requesting certain type of volume if provisioning == Volume.PROVISION_FULL and \ Pool.ELEMENT_TYPE_VOLUME_FULL & pool.element_type: dmtf_element_type = dmtf.ELEMENT_THICK_VOLUME elif (provisioning == Volume.PROVISION_THIN and Pool.ELEMENT_TYPE_VOLUME_THIN & pool.element_type): dmtf_element_type = dmtf.ELEMENT_THIN_VOLUME else: raise LsmError( ErrorNumber.NO_SUPPORT, "Pool not suitable for creating volume with " "requested provisioning type") # Get the Configuration service for the system we are interested in. cim_scs = self._c.cim_scs_of_sys_id(pool.system_id) cim_pool_path = smis_pool.lsm_pool_to_cim_pool_path( self._c, pool) in_params = {'ElementName': volume_name, 'ElementType': dmtf_element_type, 'InPool': cim_pool_path, 'Size': pywbem.Uint64(size_bytes)} error_handler = Smis._JOB_ERROR_HANDLER[ SmisCommon.JOB_RETRIEVE_VOLUME_CREATE] return self._c.invoke_method( 'CreateOrModifyElementFromStoragePool', cim_scs.path, in_params, out_handler=self._new_vol_from_name, error_handler=error_handler, retrieve_data=SmisCommon.JOB_RETRIEVE_VOLUME_CREATE, method_data=volume_name) def _detach_netapp_e(self, vol, sync): #Get the Configuration service for the system we are interested in. cim_scs = self._c.cim_scs_of_sys_id(vol.system_id) in_params = {'Operation': pywbem.Uint16(2), 'Synchronization': sync.path} self._c.invoke_method_wait( 'ModifySynchronization', cim_scs.path, in_params) def _detach(self, vol, sync): if self._c.is_netappe(): return self._detach_netapp_e(vol, sync) cim_rs = self._c.cim_rs_of_sys_id(vol.system_id, raise_error=False) if cim_rs: in_params = {'Operation': pywbem.Uint16(8), 'Synchronization': sync.path} self._c.invoke_method_wait( 'ModifyReplicaSynchronization', cim_rs.path, in_params) @staticmethod def _cim_name_match(a, b): if a['DeviceID'] == b['DeviceID'] \ and a['SystemName'] == b['SystemName'] \ and a['SystemCreationClassName'] == \ b['SystemCreationClassName']: return True else: return False def _deal_volume_associations_netappe(self, vol, cim_vol_path): """ Check a volume to see if it has any associations with other volumes. """ rc = False ss = self._c.References(cim_vol_path, ResultClass='CIM_StorageSynchronized') if len(ss): for s in ss: if 'SyncedElement' in s: item = s['SyncedElement'] if Smis._cim_name_match(item, cim_vol_path): self._detach(vol, s) rc = True if 'SystemElement' in s: item = s['SystemElement'] if Smis._cim_name_match(item, cim_vol_path): self._detach(vol, s) rc = True return rc def _deal_volume_associations(self, vol, cim_vol_path): """ Check a volume to see if it has any associations with other volumes and deal with them. """ if self._c.is_netappe(): return self._deal_volume_associations_netappe(vol, cim_vol_path) try: ss = self._c.References(cim_vol_path, ResultClass='CIM_StorageSynchronized') except pywbem.CIMError as e: if e[0] == pywbem.CIM_ERR_INVALID_CLASS: return else: raise if len(ss): for s in ss: # TODO: Need to see if detach is a supported operation in # replication capabilities. # # TODO: Theory of delete. Some arrays will automatically # detach a clone, check # ReplicationServiceCapabilities.GetSupportedFeatures() and # look for "Synchronized clone target detaches automatically". # If not automatic then detach manually. However, we have # seen arrays that don't report detach automatically that # don't need a detach. # # This code needs to be re-investigated to work with a wide # range of array vendors. if 'SyncState' in s and 'CopyType' in s: if s['SyncState'] == dmtf.ST_SYNC_STATE_SYNCHRONIZED and \ s['CopyType'] != \ dmtf.ST_CONF_CAP_COPY_TYPE_UNSYNC_ASSOC: if 'SyncedElement' in s: item = s['SyncedElement'] if Smis._cim_name_match(item, cim_vol_path): self._detach(vol, s) if 'SystemElement' in s: item = s['SystemElement'] if Smis._cim_name_match(item, cim_vol_path): self._detach(vol, s) def _volume_delete_netapp_e(self, volume, flags=0): cim_scs = self._c.cim_scs_of_sys_id(volume.system_id) cim_vol_path = smis_vol.lsm_vol_to_cim_vol_path(self._c, volume) #If we actually have an association to delete, the volume will be #deleted with the association, no need to call ReturnToStoragePool if not self._deal_volume_associations(volume, cim_vol_path): in_params = {'TheElement': cim_vol_path} #Delete returns None or Job number return self._c.invoke_method( 'ReturnToStoragePool', cim_scs.path, in_params)[0] #Loop to check to see if volume is actually gone yet! try: cim_vol = self._c.GetInstance(cim_vol_path, PropertyList=[]) while cim_vol is not None: cim_vol = self._c.GetInstance(cim_vol_path, PropertyList=[]) time.sleep(0.125) except (LsmError, CIMError) as e: pass @handle_cim_errors def volume_delete(self, volume, flags=0): """ Delete a volume """ cim_scs = self._c.cim_scs_of_sys_id(volume.system_id) cim_vol_path = smis_vol.lsm_vol_to_cim_vol_path(self._c, volume) self._deal_volume_associations(volume, cim_vol_path) in_params = {'TheElement': cim_vol_path} # Delete returns None or Job number return self._c.invoke_method( 'ReturnToStoragePool', cim_scs.path, in_params)[0] @handle_cim_errors def volume_resize(self, volume, new_size_bytes, flags=0): """ Re-size a volume """ cim_scs = self._c.cim_scs_of_sys_id(volume.system_id) cim_vol_path = smis_vol.lsm_vol_to_cim_vol_path(self._c, volume) in_params = {'ElementType': pywbem.Uint16(2), 'TheElement': cim_vol_path, 'Size': pywbem.Uint64(new_size_bytes)} return self._c.invoke_method( 'CreateOrModifyElementFromStoragePool', cim_scs.path, in_params, out_handler=self._new_vol_from_name, retrieve_data=SmisCommon.JOB_RETRIEVE_VOLUME) def _get_supported_sync_and_mode(self, system_id, rep_type): """ Converts from a library capability to a suitable array capability returns a tuple (sync, mode) """ rc = [None, None] cim_rs = self._c.cim_rs_of_sys_id(system_id, raise_error=False) if cim_rs: rs_cap = self._c.Associators( cim_rs.path, AssocClass='CIM_ElementCapabilities', ResultClass='CIM_ReplicationServiceCapabilities')[0] s_rt = rs_cap['SupportedReplicationTypes'] if rep_type == Volume.REPLICATE_COPY: if dmtf.REPLICA_CAP_TYPE_SYNC_CLONE_LOCAL in s_rt: rc[0] = dmtf.SYNC_TYPE_CLONE rc[1] = dmtf.REPLICA_MODE_SYNC elif dmtf.REPLICA_CAP_TYPE_ASYNC_CLONE_LOCAL in s_rt: rc[0] = dmtf.SYNC_TYPE_CLONE rc[1] = dmtf.REPLICA_MODE_ASYNC elif rep_type == Volume.REPLICATE_MIRROR_ASYNC: if dmtf.REPLICA_CAP_TYPE_ASYNC_MIRROR_LOCAL in s_rt: rc[0] = dmtf.SYNC_TYPE_MIRROR rc[1] = dmtf.REPLICA_MODE_ASYNC elif rep_type == Volume.REPLICATE_MIRROR_SYNC: if dmtf.REPLICA_CAP_TYPE_SYNC_MIRROR_LOCAL in s_rt: rc[0] = dmtf.SYNC_TYPE_MIRROR rc[1] = dmtf.REPLICA_MODE_SYNC elif rep_type == Volume.REPLICATE_CLONE: if dmtf.REPLICA_CAP_TYPE_SYNC_CLONE_LOCAL in s_rt: rc[0] = dmtf.SYNC_TYPE_SNAPSHOT rc[1] = dmtf.REPLICA_MODE_SYNC elif dmtf.REPLICA_CAP_TYPE_ASYNC_CLONE_LOCAL in s_rt: rc[0] = dmtf.SYNC_TYPE_SNAPSHOT rc[1] = dmtf.REPLICA_MODE_ASYNC if rc[0] is None: raise LsmError(ErrorNumber.NO_SUPPORT, "Replication type not supported") return tuple(rc) @handle_cim_errors def volume_replicate(self, pool, rep_type, volume_src, name, flags=0): """ Replicate a volume """ if rep_type == Volume.REPLICATE_MIRROR_ASYNC \ or rep_type == Volume.REPLICATE_MIRROR_SYNC: raise LsmError(ErrorNumber.NO_SUPPORT, "Mirroring not supported") cim_rs = self._c.cim_rs_of_sys_id( volume_src.system_id, raise_error=False) # Some (EMC VMAX, Dot hill) SMI-S Provider allow duplicated # ElementName, we have to do pre-check here. if smis_vol.volume_name_exists(self._c, name): raise LsmError(ErrorNumber.NAME_CONFLICT, "Volume with name '%s' already exists!" % name) cim_pool_path = None if pool is not None: cim_pool_path = smis_pool.lsm_pool_to_cim_pool_path(self._c, pool) src_cim_vol_path = smis_vol.lsm_vol_to_cim_vol_path( self._c, volume_src) if cim_rs: method = 'CreateElementReplica' sync, mode = self._get_supported_sync_and_mode( volume_src.system_id, rep_type) in_params = {'ElementName': name, 'SyncType': sync, #'Mode': mode, 'SourceElement': src_cim_vol_path, 'WaitForCopyState': dmtf.COPY_STATE_SYNC} else: # Check for older support via storage configuration service method = 'CreateReplica' # Check for storage configuration service cim_rs = self._c.cim_scs_of_sys_id( volume_src.system_id, raise_error=False) ct = Volume.REPLICATE_CLONE if rep_type == Volume.REPLICATE_CLONE: ct = dmtf.ST_CONF_CAP_COPY_TYPE_UNSYNC_ASSOC elif rep_type == Volume.REPLICATE_COPY: ct = dmtf.ST_CONF_CAP_COPY_TYPE_UNSYNC_UNASSOC elif rep_type == Volume.REPLICATE_MIRROR_ASYNC: ct = dmtf.ST_CONF_CAP_COPY_TYPE_ASYNC elif rep_type == Volume.REPLICATE_MIRROR_SYNC: ct = dmtf.ST_CONF_CAP_COPY_TYPE_SYNC in_params = {'ElementName': name, 'CopyType': ct, 'SourceElement': src_cim_vol_path} if cim_rs: if cim_pool_path is not None: in_params['TargetPool'] = cim_pool_path return self._c.invoke_method( method, cim_rs.path, in_params, out_handler=self._new_vol_from_name, retrieve_data=SmisCommon.JOB_RETRIEVE_VOLUME) raise LsmError(ErrorNumber.NO_SUPPORT, "volume-replicate not supported") def _cim_dev_mg_path_create(self, cim_gmms_path, name, cim_vol_path, vol_id): rc = SmisCommon.SNIA_INVOKE_FAILED out = None in_params = { 'GroupName': name, 'Members': [cim_vol_path], 'Type': dmtf.MASK_GROUP_TYPE_DEV} cim_dev_mg_path = None try: cim_dev_mg_path = self._c.invoke_method_wait( 'CreateGroup', cim_gmms_path, in_params, out_key='MaskingGroup', expect_class='CIM_TargetMaskingGroup') except (LsmError, CIMError): cim_dev_mg_path = self._check_exist_cim_dev_mg( name, cim_gmms_path, cim_vol_path, vol_id) if cim_dev_mg_path is None: raise return cim_dev_mg_path def _cim_tgt_mg_path_create(self, cim_sys_path, cim_gmms_path, name, init_type): """ Create CIM_TargetMaskingGroup Currently, LSM does not support target ports masking we will mask to all target ports. Return CIMInstanceName of CIM_TargetMaskingGroup """ rc = SmisCommon.SNIA_INVOKE_FAILED out = None in_params = { 'GroupName': name, 'Type': dmtf.MASK_GROUP_TYPE_TGT} if init_type == AccessGroup.INIT_TYPE_WWPN: cim_fc_tgts = self._cim_fc_tgt_of(cim_sys_path) all_cim_fc_peps_path = [] all_cim_fc_peps_path.extend( [self._cim_pep_path_of_fc_tgt(x.path) for x in cim_fc_tgts]) in_params['Members'] = all_cim_fc_peps_path elif init_type == AccessGroup.INIT_TYPE_ISCSI_IQN: cim_iscsi_pgs = self._cim_iscsi_pg_of(cim_sys_path) in_params['Members'] = [x.path for x in cim_iscsi_pgs] else: # Already checked at the beginning of this method pass cim_tgt_mg_path = None try: cim_tgt_mg_path = self._c.invoke_method_wait( 'CreateGroup', cim_gmms_path, in_params, out_key='MaskingGroup', expect_class='CIM_TargetMaskingGroup') except (LsmError, CIMError): cim_tgt_mg_path = self._check_exist_cim_tgt_mg(name) if cim_tgt_mg_path is None: raise return cim_tgt_mg_path def _cim_spc_path_create(self, cim_gmms_path, cim_init_mg_path, cim_tgt_mg_path, cim_dev_mg_path, name): in_params = { 'ElementName': name, 'InitiatorMaskingGroup': cim_init_mg_path, 'TargetMaskingGroup': cim_tgt_mg_path, 'DeviceMaskingGroup': cim_dev_mg_path, } return self._c.invoke_method_wait( 'CreateMaskingView', cim_gmms_path, in_params, out_key='ProtocolController', expect_class='CIM_SCSIProtocolController') def _volume_mask_group(self, access_group, volume, flags=0): """ Grant access to a volume to an group Use GroupMaskingMappingService.AddMembers() for Group Masking Use ControllerConfigurationService.ExposePaths() for Masking. Currently, LSM does not have a way to control which target port to mask. If CIM_TargetMaskingGroup already defined for current CIM_InitiatorMaskingGroup, we use that. If No CIM_TargetMaskingGroup exist, we create one with all possible target ports(all FC and FCoE port for access_group.init_type == WWPN, and the same to iSCSI) """ cim_init_mg_path = smis_ag.lsm_ag_to_cim_init_mg_path( self._c, access_group) cim_inits = smis_ag.cim_init_of_cim_init_mg_path( self._c, cim_init_mg_path) if len(cim_inits) == 0: raise LsmError(ErrorNumber.EMPTY_ACCESS_GROUP, "Access group %s is empty(no member), " % access_group.id + "will not do volume_mask()") if access_group.init_type != AccessGroup.INIT_TYPE_WWPN and \ access_group.init_type != AccessGroup.INIT_TYPE_ISCSI_IQN: raise LsmError(ErrorNumber.NO_SUPPORT, "SMI-S plugin only support iSCSI and FC/FCoE " "access group volume masking, but got " "access group init_type: %d" % access_group.init_type) cim_vol_path = smis_vol.lsm_vol_to_cim_vol_path(self._c, volume) cim_gmms = self._c.cim_gmms_of_sys_id(access_group.system_id) cim_spcs_path = self._c.AssociatorNames( cim_init_mg_path, AssocClass='CIM_AssociatedInitiatorMaskingGroup', ResultClass='CIM_SCSIProtocolController') if len(cim_spcs_path) == 0: # We have to create the SPC and dev_mg now. cim_sys = smis_sys.cim_sys_of_sys_id( self._c, access_group.system_id) cim_tgt_mg_path = self._cim_tgt_mg_path_create( cim_sys.path, cim_gmms.path, access_group.name, access_group.init_type) cim_dev_mg_path = self._cim_dev_mg_path_create( cim_gmms.path, access_group.name, cim_vol_path, volume.id) # Done when SPC created. self._cim_spc_path_create( cim_gmms.path, cim_init_mg_path, cim_tgt_mg_path, cim_dev_mg_path, access_group.name) else: # CIM_InitiatorMaskingGroup might have multiple SPC when having # many tgt_mg. It's seldom use, but possible. for cim_spc_path in cim_spcs_path: # Check whether already masked cim_vols = smis_ag.cim_vols_masked_to_cim_spc_path( self._c, cim_spc_path, smis_vol.cim_vol_id_pros()) for cur_cim_vol in cim_vols: if smis_vol.vol_id_of_cim_vol(cur_cim_vol) == volume.id: raise LsmError( ErrorNumber.NO_STATE_CHANGE, "Volume already masked to requested access group") # SNIA require each cim_spc only have one cim_dev_mg # associated cim_dev_mg_path = self._c.AssociatorNames( cim_spc_path, AssocClass='CIM_AssociatedDeviceMaskingGroup', ResultClass='CIM_DeviceMaskingGroup')[0] in_params = { 'MaskingGroup': cim_dev_mg_path, 'Members': [cim_vol_path], } self._c.invoke_method_wait( 'AddMembers', cim_gmms.path, in_params) return None @handle_cim_errors def volume_mask(self, access_group, volume, flags=0): """ Grant access to a volume to an group """ mask_type = smis_cap.mask_type(self._c, raise_error=True) # Workaround for EMC VNX/CX if mask_type == smis_cap.MASK_TYPE_GROUP: cim_sys = smis_sys.cim_sys_of_sys_id(self._c, volume.system_id) if cim_sys.path.classname == 'Clar_StorageSystem': mask_type = smis_cap.MASK_TYPE_MASK if mask_type == smis_cap.MASK_TYPE_GROUP: return self._volume_mask_group(access_group, volume, flags) return self._volume_mask_old(access_group, volume, flags) def _cim_vol_masked_to_spc(self, cim_spc_path, vol_id, property_list=None): """ Check whether provided volume id is masked to cim_spc_path. If so, return cim_vol, or return None """ if property_list is None: property_list = smis_vol.cim_vol_id_pros() else: property_list = merge_list( property_list, smis_vol.cim_vol_id_pros()) masked_cim_vols = smis_ag.cim_vols_masked_to_cim_spc_path( self._c, cim_spc_path, property_list) for masked_cim_vol in masked_cim_vols: if smis_vol.vol_id_of_cim_vol(masked_cim_vol) == vol_id: return masked_cim_vol return None def _volume_mask_old(self, access_group, volume, flags): cim_spc_path = smis_ag.lsm_ag_to_cim_spc_path(self._c, access_group) cim_inits = smis_ag.cim_init_of_cim_spc_path(self._c, cim_spc_path) if len(cim_inits) == 0: raise LsmError(ErrorNumber.EMPTY_ACCESS_GROUP, "Access group %s is empty(no member), " % access_group.id + "will not do volume_mask()") # Pre-Check: Already masked if self._cim_vol_masked_to_spc(cim_spc_path, volume.id): raise LsmError( ErrorNumber.NO_STATE_CHANGE, "Volume already masked to requested access group") cim_ccs = self._c.cim_ccs_of_sys_id(volume.system_id) cim_vol_path = smis_vol.lsm_vol_to_cim_vol_path(self._c, volume) cim_vol = self._c.GetInstance(cim_vol_path, PropertyList=['Name']) in_params = {'LUNames': [cim_vol['Name']], 'ProtocolControllers': [cim_spc_path], 'DeviceAccesses': [dmtf.CTRL_CONF_SRV_DA_RW]} self._c.invoke_method_wait('ExposePaths', cim_ccs.path, in_params) return None def _volume_unmask_group(self, access_group, volume): """ Use CIM_GroupMaskingMappingService.RemoveMembers() against CIM_DeviceMaskingGroup If SupportedDeviceGroupFeatures does not allow empty DeviceMaskingGroup in SPC, we remove SPC and DeviceMaskingGroup. """ cim_sys = smis_sys.cim_sys_of_sys_id(self._c, volume.system_id) cim_gmms_cap = self._c.Associators( cim_sys.path, AssocClass='CIM_ElementCapabilities', ResultClass='CIM_GroupMaskingMappingCapabilities', PropertyList=['SupportedDeviceGroupFeatures', 'SupportedSynchronousActions', 'SupportedAsynchronousActions'])[0] flag_empty_dev_in_spc = False if dmtf.GMM_CAP_DEV_MG_ALLOW_EMPTY_W_SPC in \ cim_gmms_cap['SupportedDeviceGroupFeatures']: flag_empty_dev_in_spc = True if flag_empty_dev_in_spc is False: if ((dmtf.GMM_CAP_DELETE_SPC not in cim_gmms_cap['SupportedSynchronousActions']) and (dmtf.GMM_CAP_DELETE_SPC not in cim_gmms_cap['SupportedAsynchronousActions'])): raise LsmError( ErrorNumber.NO_SUPPORT, "volume_unmask() not supported. It requires one of these " "1. support of DeleteMaskingView(). 2. allowing empty " "DeviceMaskingGroup in SPC. But target SMI-S provider " "does not support any of these") cim_vol_path = smis_vol.lsm_vol_to_cim_vol_path(self._c, volume) vol_cim_spcs_path = self._c.AssociatorNames( cim_vol_path, AssocClass='CIM_ProtocolControllerForUnit', ResultClass='CIM_SCSIProtocolController') if len(vol_cim_spcs_path) == 0: # Already unmasked raise LsmError( ErrorNumber.NO_STATE_CHANGE, "Volume is not masked to requested access group") cim_init_mg_path = smis_ag.lsm_ag_to_cim_init_mg_path( self._c, access_group) ag_cim_spcs_path = self._c.AssociatorNames( cim_init_mg_path, AssocClass='CIM_AssociatedInitiatorMaskingGroup', ResultClass='CIM_SCSIProtocolController') found_cim_spc_path = None for ag_cim_spc_path in ag_cim_spcs_path: for vol_cim_spc_path in vol_cim_spcs_path: if vol_cim_spc_path == ag_cim_spc_path: found_cim_spc_path = vol_cim_spc_path break if found_cim_spc_path is None: # Already unmasked raise LsmError( ErrorNumber.NO_STATE_CHANGE, "Volume is not masked to requested access group") # SNIA require each cim_spc only have one cim_dev_mg associated. cim_dev_mg_path = self._c.AssociatorNames( found_cim_spc_path, AssocClass='CIM_AssociatedDeviceMaskingGroup', ResultClass='CIM_DeviceMaskingGroup')[0] cim_gmms = self._c.cim_gmms_of_sys_id(volume.system_id) if flag_empty_dev_in_spc is False: # We have to check whether this volume is the last # one in the DeviceMaskingGroup, if so, we have to # delete the SPC cur_cim_vols_path = self._c.AssociatorNames( cim_dev_mg_path, AssocClass='CIM_OrderedMemberOfCollection', ResultClass='CIM_StorageVolume') if len(cur_cim_vols_path) == 1: # last volume, should delete SPC in_params = { 'ProtocolController': found_cim_spc_path, } self._c.invoke_method_wait( 'DeleteMaskingView', cim_gmms.path, in_params) in_params = { 'MaskingGroup': cim_dev_mg_path, 'Members': [cim_vol_path], } self._c.invoke_method_wait( 'RemoveMembers', cim_gmms.path, in_params) return None @handle_cim_errors def volume_unmask(self, access_group, volume, flags=0): mask_type = smis_cap.mask_type(self._c, raise_error=True) # Workaround for EMC VNX/CX if mask_type == smis_cap.MASK_TYPE_GROUP: cim_sys = smis_sys.cim_sys_of_sys_id(self._c, volume.system_id) if cim_sys.path.classname == 'Clar_StorageSystem': mask_type = smis_cap.MASK_TYPE_MASK if mask_type == smis_cap.MASK_TYPE_GROUP: return self._volume_unmask_group(access_group, volume) return self._volume_unmask_old(access_group, volume) def _volume_unmask_old(self, access_group, volume): cim_ccs = self._c.cim_ccs_of_sys_id(volume.system_id) cim_spc_path = smis_ag.lsm_ag_to_cim_spc_path(self._c, access_group) # Pre-check: not masked cim_vol = self._cim_vol_masked_to_spc( cim_spc_path, volume.id, ['Name']) if cim_vol is None: raise LsmError( ErrorNumber.NO_STATE_CHANGE, "Volume is not masked to requested access group") hide_params = {'LUNames': [cim_vol['Name']], 'ProtocolControllers': [cim_spc_path]} self._c.invoke_method_wait('HidePaths', cim_ccs.path, hide_params) return None def _is_access_group(self, cim_spc): if self._c.is_netappe(): return True rc = True _SMIS_EMC_ADAPTER_ROLE_MASKING = 'MASK_VIEW' if 'EMCAdapterRole' in cim_spc: # Currently SNIA does not define LUN mapping. # EMC is using their specific way for LUN mapping which # expose their frontend ports as a SPC(SCSIProtocolController). # which we shall filter out. emc_adp_roles = cim_spc['EMCAdapterRole'].split(' ') if _SMIS_EMC_ADAPTER_ROLE_MASKING not in emc_adp_roles: rc = False return rc def _cim_spc_of(self, system_id, property_list=None): """ Return a list of CIM_SCSIProtocolController. Following SNIA SMIS 'Masking and Mapping Profile': CIM_ControllerConfigurationService | | CIM_ConcreteDependency v CIM_SCSIProtocolController """ cim_ccs = None rc_cim_spcs = [] if property_list is None: property_list = [] try: cim_ccs = self._c.cim_ccs_of_sys_id(system_id, raise_error=False) except CIMError as ce: error_code = tuple(ce)[0] if error_code == pywbem.CIM_ERR_INVALID_CLASS or \ error_code == pywbem.CIM_ERR_INVALID_PARAMETER: raise LsmError(ErrorNumber.NO_SUPPORT, 'AccessGroup is not supported ' + 'by this array') if cim_ccs is None: raise LsmError(ErrorNumber.NO_SUPPORT, 'AccessGroup is not supported by this array') cim_spcs = self._c.Associators( cim_ccs.path, AssocClass='CIM_ConcreteDependency', ResultClass='CIM_SCSIProtocolController', PropertyList=property_list) for cim_spc in cim_spcs: if self._is_access_group(cim_spc): rc_cim_spcs.append(cim_spc) return rc_cim_spcs @handle_cim_errors def volumes_accessible_by_access_group(self, access_group, flags=0): mask_type = smis_cap.mask_type(self._c, raise_error=True) cim_vols = [] cim_vol_pros = smis_vol.cim_vol_pros() # Workaround for EMC VNX/CX if mask_type == smis_cap.MASK_TYPE_GROUP: cim_sys = smis_sys.cim_sys_of_sys_id( self._c, access_group.system_id) if cim_sys.path.classname == 'Clar_StorageSystem': mask_type = smis_cap.MASK_TYPE_MASK if mask_type == smis_cap.MASK_TYPE_GROUP: cim_init_mg_path = smis_ag.lsm_ag_to_cim_init_mg_path( self._c, access_group) cim_spcs_path = self._c.AssociatorNames( cim_init_mg_path, AssocClass='CIM_AssociatedInitiatorMaskingGroup', ResultClass='CIM_SCSIProtocolController') for cim_spc_path in cim_spcs_path: cim_vols.extend( smis_ag.cim_vols_masked_to_cim_spc_path( self._c, cim_spc_path, cim_vol_pros)) else: cim_spc_path = smis_ag.lsm_ag_to_cim_spc_path( self._c, access_group) cim_vols = smis_ag.cim_vols_masked_to_cim_spc_path( self._c, cim_spc_path, cim_vol_pros) rc = [] for cim_vol in cim_vols: pool_id = smis_pool.pool_id_of_cim_vol(self._c, cim_vol.path) sys_id = smis_sys.sys_id_of_cim_vol(cim_vol) rc.append( smis_vol.cim_vol_to_lsm_vol(cim_vol, pool_id, sys_id)) return rc @handle_cim_errors def access_groups_granted_to_volume(self, volume, flags=0): rc = [] mask_type = smis_cap.mask_type(self._c, raise_error=True) cim_vol_path = smis_vol.lsm_vol_to_cim_vol_path(self._c, volume) # Workaround for EMC VNX/CX if mask_type == smis_cap.MASK_TYPE_GROUP: cim_sys = smis_sys.cim_sys_of_sys_id(self._c, volume.system_id) if cim_sys.path.classname == 'Clar_StorageSystem': mask_type = smis_cap.MASK_TYPE_MASK cim_spc_pros = None if mask_type == smis_cap.MASK_TYPE_GROUP: cim_spc_pros = [] else: cim_spc_pros = smis_ag.cim_spc_pros() cim_spcs = self._c.Associators( cim_vol_path, AssocClass='CIM_ProtocolControllerForUnit', ResultClass='CIM_SCSIProtocolController', PropertyList=cim_spc_pros) if mask_type == smis_cap.MASK_TYPE_GROUP: cim_init_mg_pros = smis_ag.cim_init_mg_pros() for cim_spc in cim_spcs: cim_init_mgs = self._c.Associators( cim_spc.path, AssocClass='CIM_AssociatedInitiatorMaskingGroup', ResultClass='CIM_InitiatorMaskingGroup', PropertyList=cim_init_mg_pros) rc.extend( list( smis_ag.cim_init_mg_to_lsm_ag( self._c, x, volume.system_id) for x in cim_init_mgs)) else: for cim_spc in cim_spcs: if self._is_access_group(cim_spc): rc.append( smis_ag.cim_spc_to_lsm_ag( self._c, cim_spc, volume.system_id)) return rc def _cim_init_mg_of(self, system_id, property_list=None): """ We use this association to get all CIM_InitiatorMaskingGroup: CIM_GroupMaskingMappingService | | CIM_ServiceAffectsElement v CIM_InitiatorMaskingGroup """ if property_list is None: property_list = [] cim_gmms = self._c.cim_gmms_of_sys_id(system_id) return self._c.Associators( cim_gmms.path, AssocClass='CIM_ServiceAffectsElement', ResultClass='CIM_InitiatorMaskingGroup', PropertyList=property_list) @handle_cim_errors def access_groups(self, search_key=None, search_value=None, flags=0): rc = [] mask_type = smis_cap.mask_type(self._c, raise_error=True) cim_sys_pros = smis_sys.cim_sys_id_pros() cim_syss = smis_sys.root_cim_sys(self._c, cim_sys_pros) cim_spc_pros = smis_ag.cim_spc_pros() for cim_sys in cim_syss: if cim_sys.path.classname == 'Clar_StorageSystem': # Workaround for EMC VNX/CX. # Even they claim support of Group M&M via # CIM_RegisteredProfile, but actually they don't support it. mask_type = smis_cap.MASK_TYPE_MASK system_id = smis_sys.sys_id_of_cim_sys(cim_sys) if mask_type == smis_cap.MASK_TYPE_GROUP: cim_init_mg_pros = smis_ag.cim_init_mg_pros() cim_init_mgs = self._cim_init_mg_of( system_id, cim_init_mg_pros) rc.extend( list( smis_ag.cim_init_mg_to_lsm_ag(self._c, x, system_id) for x in cim_init_mgs)) elif mask_type == smis_cap.MASK_TYPE_MASK: cim_spcs = self._cim_spc_of(system_id, cim_spc_pros) rc.extend( list( smis_ag.cim_spc_to_lsm_ag(self._c, cim_spc, system_id) for cim_spc in cim_spcs)) else: raise LsmError(ErrorNumber.PLUGIN_BUG, "_get_cim_spc_by_id(): Got invalid mask_type: " "%s" % mask_type) return search_property(rc, search_key, search_value) def _ag_init_add_group(self, access_group, init_id, init_type): cim_sys = smis_sys.cim_sys_of_sys_id(self._c, access_group.system_id) if cim_sys.path.classname == 'Clar_StorageSystem': raise LsmError(ErrorNumber.NO_SUPPORT, "EMC VNX/CX require WWNN defined when adding a " "new initiator which is not supported by LSM yet. " "Please do it via EMC vendor specific tools.") cim_init_mg_path = smis_ag.lsm_ag_to_cim_init_mg_path( self._c, access_group) exist_cim_inits = smis_ag.cim_init_of_cim_init_mg_path( self._c, cim_init_mg_path) # Check whether already added. for exist_cim_init in exist_cim_inits: if smis_ag.init_id_of_cim_init(exist_cim_init) == init_id: return copy.deepcopy(access_group) cim_init_path = smis_ag.cim_init_path_check_or_create( self._c, access_group.system_id, init_id, init_type) cim_gmms = self._c.cim_gmms_of_sys_id(access_group.system_id) in_params = { 'MaskingGroup': cim_init_mg_path, 'Members': [cim_init_path], } new_cim_init_mg_path = self._c.invoke_method_wait( 'AddMembers', cim_gmms.path, in_params, out_key='MaskingGroup', expect_class='CIM_InitiatorMaskingGroup') cim_init_mg_pros = smis_ag.cim_init_mg_pros() new_cim_init_mg = self._c.GetInstance( new_cim_init_mg_path, PropertyList=cim_init_mg_pros, LocalOnly=False) return smis_ag.cim_init_mg_to_lsm_ag( self._c, new_cim_init_mg, access_group.system_id) @handle_cim_errors def access_group_initiator_add(self, access_group, init_id, init_type, flags=0): init_id = smis_ag.lsm_init_id_to_snia(init_id) mask_type = smis_cap.mask_type(self._c, raise_error=True) if mask_type == smis_cap.MASK_TYPE_GROUP: return self._ag_init_add_group(access_group, init_id, init_type) else: return self._ag_init_add_old(access_group, init_id, init_type) def _ag_init_add_old(self, access_group, init_id, init_type): # CIM_StorageHardwareIDManagementService.CreateStorageHardwareID() # is mandatory since 1.4rev6 cim_sys = smis_sys.cim_sys_of_sys_id(self._c, access_group.system_id) if cim_sys.path.classname == 'Clar_StorageSystem': raise LsmError(ErrorNumber.NO_SUPPORT, "EMC VNX/CX require WWNN defined when adding " "new initiator which is not supported by LSM yet. " "Please do it via EMC vendor specific tools. " "EMC VNX does not support adding iSCSI IQN neither") cim_spc_path = smis_ag.lsm_ag_to_cim_spc_path( self._c, access_group) exist_cim_inits = smis_ag.cim_init_of_cim_spc_path( self._c, cim_spc_path) for exist_cim_init in exist_cim_inits: if smis_ag.init_id_of_cim_init(exist_cim_init) == init_id: return copy.deepcopy(access_group) # Check to see if we have this initiator already, if not we # create it and then add to the view. smis_ag.cim_init_path_check_or_create( self._c, access_group.system_id, init_id, init_type) cim_ccs = self._c.cim_ccs_of_sys_id(access_group.system_id) in_params = {'InitiatorPortIDs': [init_id], 'ProtocolControllers': [cim_spc_path]} cim_spc_path = self._c.invoke_method_wait( 'ExposePaths', cim_ccs.path, in_params, out_key='ProtocolControllers', flag_out_array=True, expect_class='CIM_SCSIProtocolController') cim_spc_pros = smis_ag.cim_spc_pros() cim_spc = self._c.GetInstance( cim_spc_path, PropertyList=cim_spc_pros, LocalOnly=False) return smis_ag.cim_spc_to_lsm_ag( self._c, cim_spc, access_group.system_id) def _ag_init_del_group(self, access_group, init_id): """ Call CIM_GroupMaskingMappingService.RemoveMembers() against CIM_InitiatorMaskingGroup. """ cim_init_mg_path = smis_ag.lsm_ag_to_cim_init_mg_path( self._c, access_group) cur_cim_inits = smis_ag.cim_init_of_cim_init_mg_path( self._c, cim_init_mg_path) cim_init = None for cur_cim_init in cur_cim_inits: if smis_ag.init_id_of_cim_init(cur_cim_init) == init_id: cim_init = cur_cim_init break if cim_init is None: raise LsmError(ErrorNumber.NO_STATE_CHANGE, "Initiator %s does not exist in defined " "access group %s" % (init_id, access_group.id)) if len(cur_cim_inits) == 1: raise LsmError(ErrorNumber.LAST_INIT_IN_ACCESS_GROUP, "Refuse to remove last initiator from access group") cim_gmms = self._c.cim_gmms_of_sys_id(access_group.system_id) # RemoveMembers from InitiatorMaskingGroup in_params = { 'MaskingGroup': cim_init_mg_path, 'Members': [cim_init.path], } self._c.invoke_method_wait('RemoveMembers', cim_gmms.path, in_params) cim_init_mg_pros = smis_ag.cim_init_mg_pros() cim_init_mg = self._c.GetInstance( cim_init_mg_path, PropertyList=cim_init_mg_pros) return smis_ag.cim_init_mg_to_lsm_ag( self._c, cim_init_mg, access_group.system_id) @handle_cim_errors def access_group_initiator_delete(self, access_group, init_id, init_type, flags=0): if self._c.is_netappe(): # When using HidePaths to remove initiator, the whole SPC will be # removed. Before we find a workaround for this, I would like to # have this method disabled as NO_SUPPORT. raise LsmError(ErrorNumber.NO_SUPPORT, "SMI-S plugin does not support " "access_group_initiator_delete() against NetApp-E") init_id = smis_ag.lsm_init_id_to_snia(init_id) mask_type = smis_cap.mask_type(self._c, raise_error=True) if mask_type == smis_cap.MASK_TYPE_GROUP: return self._ag_init_del_group(access_group, init_id) else: return self._ag_init_del_old(access_group, init_id) def _ag_init_del_old(self, access_group, init_id): cim_spc_path = smis_ag.lsm_ag_to_cim_spc_path(self._c, access_group) cim_ccs = self._c.cim_ccs_of_sys_id(access_group.system_id) hide_params = {'InitiatorPortIDs': [init_id], 'ProtocolControllers': [cim_spc_path]} self._c.invoke_method_wait('HidePaths', cim_ccs.path, hide_params) return None @handle_cim_errors def job_free(self, job_id, flags=0): """ Frees the resources given a job number. """ cim_job = self._c.cim_job_of_job_id(job_id, ['DeleteOnCompletion']) # See if we should delete the job if not cim_job['DeleteOnCompletion']: try: self._c.DeleteInstance(cim_job.path) except CIMError: pass @handle_cim_errors def disks(self, search_key=None, search_value=None, flags=0): """ return all object of data.Disk. We are using "Disk Drive Lite Subprofile" v1.4 of SNIA SMI-S for these classes to create LSM Disk: CIM_DiskDrive CIM_StorageExtent (Primordial) Due to 'Multiple Computer System' profile, disks might associated to sub ComputerSystem. To improve performance of listing disks, we will use EnumerateInstances(). Which means we have to filter the results by ourselves in case URI contain 'system=xxx'. """ rc = [] self._c.profile_check(SmisCommon.SNIA_DISK_LITE_PROFILE, SmisCommon.SMIS_SPEC_VER_1_4, raise_error=True) cim_disk_pros = smis_disk.cim_disk_pros() cim_disks = self._c.EnumerateInstances( 'CIM_DiskDrive', PropertyList=cim_disk_pros) for cim_disk in cim_disks: if self._c.system_list and \ smis_disk.sys_id_of_cim_disk(cim_disk) not in \ self._c.system_list: continue rc.extend([smis_disk.cim_disk_to_lsm_disk(self._c, cim_disk)]) return search_property(rc, search_key, search_value) @staticmethod def _is_frontend_fc_tgt(cim_fc_tgt): """ Check CIM_FCPort['UsageRestriction'] for frontend port. """ dmtf_usage = cim_fc_tgt['UsageRestriction'] if dmtf_usage == dmtf.TGT_PORT_USAGE_FRONTEND_ONLY or \ dmtf_usage == dmtf.TGT_PORT_USAGE_UNRESTRICTED: return True return False def _cim_fc_tgt_of(self, cim_sys_path, property_list=None): """ Get all CIM_FCPort (frontend only) from CIM_ComputerSystem and its leaf CIM_ComputerSystem """ rc = [] if property_list is None: property_list = ['UsageRestriction'] else: property_list = merge_list(property_list, ['UsageRestriction']) all_cim_syss_path = [cim_sys_path] if smis_cap.multi_sys_is_supported(self._c): all_cim_syss_path.extend( self._leaf_cim_syss_path_of(cim_sys_path)) for cur_cim_sys_path in all_cim_syss_path: cur_cim_fc_tgts = self._c.Associators( cur_cim_sys_path, AssocClass='CIM_SystemDevice', ResultClass='CIM_FCPort', PropertyList=property_list) for cim_fc_tgt in cur_cim_fc_tgts: if Smis._is_frontend_fc_tgt(cim_fc_tgt): rc.extend([cim_fc_tgt]) return rc @staticmethod def _cim_fc_tgt_to_lsm(cim_fc_tgt, system_id): """ Convert CIM_FCPort to Lsm.TargetPort """ port_id = md5(cim_fc_tgt['DeviceID']) port_type = _lsm_tgt_port_type_of_cim_fc_tgt(cim_fc_tgt) # SNIA define WWPN string as upper, no splitter, 16 digits. # No need to check. wwpn = hex_string_format(cim_fc_tgt['PermanentAddress'], 16, 2) port_name = cim_fc_tgt['ElementName'] plugin_data = None return TargetPort(port_id, port_type, wwpn, wwpn, wwpn, port_name, system_id, plugin_data) def _iscsi_node_names_of(self, cim_iscsi_pg_path): """ CIM_iSCSIProtocolEndpoint | | v CIM_SAPAvailableForElement | | v CIM_SCSIProtocolController # iSCSI Node """ cim_spcs = self._c.Associators( cim_iscsi_pg_path, ResultClass='CIM_SCSIProtocolController', AssocClass='CIM_SAPAvailableForElement', PropertyList=['Name', 'NameFormat']) cim_iscsi_nodes = [] for cim_spc in cim_spcs: if cim_spc.classname == 'Clar_MappingSCSIProtocolController': # EMC has vendor specific class which contain identical # properties of SPC for iSCSI node. continue if cim_spc['NameFormat'] == dmtf.SPC_NAME_FORMAT_ISCSI: cim_iscsi_nodes.extend([cim_spc]) if len(cim_iscsi_nodes) == 0: return [] return [n['Name'] for n in cim_iscsi_nodes] def _cim_iscsi_pg_of(self, cim_sys_path, property_list=None): """ Get all CIM_iSCSIProtocolEndpoint(Target only) from CIM_ComputerSystem and its leaf CIM_ComputerSystem """ rc = [] if property_list is None: property_list = ['Role'] else: property_list = merge_list(property_list, ['Role']) all_cim_syss_path = [cim_sys_path] if smis_cap.multi_sys_is_supported(self._c): all_cim_syss_path.extend( self._leaf_cim_syss_path_of(cim_sys_path)) for cur_cim_sys_path in all_cim_syss_path: cur_cim_iscsi_pgs = self._c.Associators( cur_cim_sys_path, AssocClass='CIM_HostedAccessPoint', ResultClass='CIM_iSCSIProtocolEndpoint', PropertyList=property_list) for cim_iscsi_pg in cur_cim_iscsi_pgs: if cim_iscsi_pg['Role'] == dmtf.ISCSI_TGT_ROLE_TARGET: rc.extend([cim_iscsi_pg]) return rc def _cim_iscsi_pg_to_lsm(self, cim_iscsi_pg, system_id): """ Return a list of TargetPort CIM_iSCSIProtocolEndpoint Associations: CIM_SCSIProtocolController # iSCSI Node ^ | CIM_SAPAvailableForElement | CIM_iSCSIProtocolEndpoint # iSCSI Portal Group | | CIM_BindsTo v CIM_TCPProtocolEndpoint # Need TCP port, default is 3260 | | CIM_BindsTo v CIM_IPProtocolEndpoint # Need IPv4 and IPv6 address | | CIM_DeviceSAPImplementation v CIM_EthernetPort # Need MAC address (Optional) Assuming there is storage array support iSER (iSCSI over RDMA of Infinity Band), this method is only for iSCSI over TCP. """ rc = [] port_type = TargetPort.TYPE_ISCSI plugin_data = None cim_tcps = self._c.Associators( cim_iscsi_pg.path, ResultClass='CIM_TCPProtocolEndpoint', AssocClass='CIM_BindsTo', PropertyList=['PortNumber']) if len(cim_tcps) == 0: raise LsmError(ErrorNumber.PLUGIN_BUG, "_cim_iscsi_pg_to_lsm(): " "No CIM_TCPProtocolEndpoint associated to %s" % cim_iscsi_pg.path) iscsi_node_names = self._iscsi_node_names_of(cim_iscsi_pg.path) if len(iscsi_node_names) == 0: return [] for cim_tcp in cim_tcps: tcp_port = cim_tcp['PortNumber'] cim_ips = self._c.Associators( cim_tcp.path, ResultClass='CIM_IPProtocolEndpoint', AssocClass='CIM_BindsTo', PropertyList=['IPv4Address', 'IPv6Address', 'SystemName', 'EMCPortNumber', 'IPv6AddressType']) for cim_ip in cim_ips: ipv4_addr = '' ipv6_addr = '' # 'IPv4Address', 'IPv6Address' are optional in SMI-S 1.4. if 'IPv4Address' in cim_ip and cim_ip['IPv4Address']: ipv4_addr = cim_ip['IPv4Address'] if 'IPv6Address' in cim_ip and cim_ip['IPv6Address']: ipv6_addr = cim_ip['IPv6Address'] # 'IPv6AddressType' is not listed in SMI-S but in DMTF CIM # Schema # Only allow IPv6 Global Unicast Address, 6to4, and Unique # Local Address. if 'IPv6AddressType' in cim_ip and cim_ip['IPv6AddressType']: ipv6_addr_type = cim_ip['IPv6AddressType'] if ipv6_addr_type != dmtf.IPV6_ADDR_TYPE_GUA and \ ipv6_addr_type != dmtf.IPV6_ADDR_TYPE_6TO4 and \ ipv6_addr_type != dmtf.IPV6_ADDR_TYPE_ULA: ipv6_addr = '' # NetApp is using this kind of IPv6 address # 0000:0000:0000:0000:0000:0000:0a10:29d5 # even when IPv6 is not enabled on their array. # It's not a legal IPv6 address anyway. No need to do # vendor check. if ipv6_addr[0:29] == '0000:0000:0000:0000:0000:0000': ipv6_addr = '' if ipv4_addr is None and ipv6_addr is None: continue cim_eths = self._c.Associators( cim_ip.path, ResultClass='CIM_EthernetPort', AssocClass='CIM_DeviceSAPImplementation', PropertyList=['PermanentAddress', 'ElementName']) nics = [] # NetApp ONTAP cluster-mode show one IP bonded to multiple # ethernet, # Not sure it's their BUG or real ethernet channel bonding. # Waiting reply. if len(cim_eths) == 0: nics = [('', '')] else: for cim_eth in cim_eths: mac_addr = '' port_name = '' if 'PermanentAddress' in cim_eth and \ cim_eth["PermanentAddress"]: mac_addr = cim_eth["PermanentAddress"] # 'ElementName' is optional in CIM_EthernetPort if 'ElementName' in cim_eth and cim_eth["ElementName"]: port_name = cim_eth['ElementName'] nics.extend([(mac_addr, port_name)]) for nic in nics: mac_address = nic[0] port_name = nic[1] if mac_address: # Convert to lsm require form mac_address = hex_string_format(mac_address, 12, 2) if ipv4_addr: network_address = "%s:%s" % (ipv4_addr, tcp_port) rc.extend( [TargetPort( md5( "%s:%s:%s" % ( mac_address, network_address, iscsi_node_name)), port_type, iscsi_node_name, network_address, mac_address, port_name, system_id, plugin_data) for iscsi_node_name in iscsi_node_names]) if ipv6_addr: # DMTF or SNIA did defined the IPv6 string format. # we just guess here. if len(ipv6_addr) == 39: ipv6_addr = ipv6_addr.replace(':', '') if len(ipv6_addr) == 32: ipv6_addr = hex_string_format( ipv6_addr, 32, 4) network_address = "[%s]:%s" % (ipv6_addr, tcp_port) rc.extend([ TargetPort( md5( "%s:%s:%s" % ( mac_address, network_address, iscsi_node_name)), port_type, iscsi_node_name, network_address, mac_address, port_name, system_id, plugin_data) for iscsi_node_name in iscsi_node_names]) return rc def _leaf_cim_syss_path_of(self, cim_sys_path): """ Return a list of CIMInstanceName of leaf CIM_ComputerSystem """ max_loop_count = 10 # There is no storage array need 10 layer of # Computer loop_counter = max_loop_count rc = [] leaf_cim_syss_path = [] try: leaf_cim_syss_path = self._c.AssociatorNames( cim_sys_path, ResultClass='CIM_ComputerSystem', AssocClass='CIM_ComponentCS', Role='GroupComponent', ResultRole='PartComponent') except CIMError as ce: error_code = tuple(ce)[0] if error_code == pywbem.CIM_ERR_INVALID_CLASS or \ error_code == pywbem.CIM_ERR_NOT_SUPPORTED: return [] if len(leaf_cim_syss_path) > 0: rc = leaf_cim_syss_path for cim_sys_path in leaf_cim_syss_path: rc.extend(self._leaf_cim_syss_path_of(cim_sys_path)) return rc @handle_cim_errors def target_ports(self, search_key=None, search_value=None, flags=0): rc = [] cim_fc_tgt_pros = ['UsageRestriction', 'ElementName', 'SystemName', 'PermanentAddress', 'PortDiscriminator', 'LinkTechnology', 'DeviceID'] cim_syss = smis_sys.root_cim_sys( self._c, property_list=smis_sys.cim_sys_id_pros()) for cim_sys in cim_syss: system_id = smis_sys.sys_id_of_cim_sys(cim_sys) flag_fc_support = smis_cap.fc_tgt_is_supported(self._c) flag_iscsi_support = smis_cap.iscsi_tgt_is_supported(self._c) # Assuming: if one system does not support target_ports(), # all systems from the same provider will not support # target_ports(). if flag_fc_support is False and flag_iscsi_support is False: raise LsmError(ErrorNumber.NO_SUPPORT, "Target SMI-S provider does not support any of" "these profiles: '%s %s', '%s %s'" % (SmisCommon.SMIS_SPEC_VER_1_4, SmisCommon.SNIA_FC_TGT_PORT_PROFILE, SmisCommon.SMIS_SPEC_VER_1_1, SmisCommon.SNIA_ISCSI_TGT_PORT_PROFILE)) if flag_fc_support: # CIM_FCPort might be not belong to root cim_sys # In that case, CIM_FCPort['SystemName'] will not be # the name of root CIM_ComputerSystem. cim_fc_tgt_pros = ['UsageRestriction', 'ElementName', 'SystemName', 'PermanentAddress', 'PortDiscriminator', 'LinkTechnology', 'DeviceID'] cim_fc_tgts = self._cim_fc_tgt_of(cim_sys.path, cim_fc_tgt_pros) rc.extend( list( Smis._cim_fc_tgt_to_lsm(x, system_id) for x in cim_fc_tgts)) if flag_iscsi_support: cim_iscsi_pgs = self._cim_iscsi_pg_of(cim_sys.path) for cim_iscsi_pg in cim_iscsi_pgs: rc.extend( self._cim_iscsi_pg_to_lsm(cim_iscsi_pg, system_id)) # NetApp is sharing CIM_TCPProtocolEndpoint which # cause duplicate TargetPort. It's a long story, they heard my # bug report. if len(cim_syss) >= 1 and \ cim_syss[0].classname == 'ONTAP_StorageSystem': id_list = [] new_rc = [] # We keep the original list order by not using dict.values() for lsm_tp in rc: if lsm_tp.id not in id_list: id_list.extend([lsm_tp.id]) new_rc.extend([lsm_tp]) rc = new_rc return search_property(rc, search_key, search_value) def _cim_pep_path_of_fc_tgt(self, cim_fc_tgt_path): """ Return CIMInstanceName of CIM_SCSIProtocolEndpoint of CIM_FCPort In 1.4r6, it's one-to-one map. """ return self._c.AssociatorNames( cim_fc_tgt_path, AssocClass='CIM_DeviceSAPImplementation', ResultClass='CIM_SCSIProtocolEndpoint')[0] def _check_exist_cim_tgt_mg(self, name): """ We should do more checks[1] in stead of use it directly. But considering EMC VMAX is the only support vendor, make it quick and works could be priority 1. We can improve this for any bug report. [1] At least check whether CIM_TargetMaskingGroup is already used by other SPC. """ cim_tgt_mgs = self._c.EnumerateInstances( 'CIM_TargetMaskingGroup', PropertyList=['ElementName']) for cim_tgt_mg in cim_tgt_mgs: if cim_tgt_mg['ElementName'] == name: return cim_tgt_mg.path return None def _check_exist_cim_dev_mg(self, name, cim_gmms_path, cim_vol_path, vol_id): """ This is buggy check, but it works on EMC VMAX which is only supported platform of Group Masking and Mapping. When found CIM_DeviceMaskingGroup, make sure cim_vol is included. """ cim_dev_mgs = self._c.EnumerateInstances( 'CIM_DeviceMaskingGroup', PropertyList=['ElementName']) cim_dev_mg = None for tmp_cim_dev_mg in cim_dev_mgs: if tmp_cim_dev_mg['ElementName'] == name: cim_dev_mg = tmp_cim_dev_mg break if cim_dev_mg: # Check whether cim_vol included. cim_vol_pros = smis_vol.cim_vol_id_pros() cim_vols = self._c.Associators( cim_dev_mg.path, AssocClass='CIM_OrderedMemberOfCollection', ResultClass='CIM_StorageVolume', PropertyList=cim_vol_pros) for cim_vol in cim_vols: if smis_vol.vol_id_of_cim_vol(cim_vol) == vol_id: return cim_dev_mg.path # We should add this volume to found DeviceMaskingGroup in_params = { 'MaskingGroup': cim_dev_mg.path, 'Members': [cim_vol_path], } self._c.invoke_method_wait('AddMembers', cim_gmms_path, in_params) return cim_dev_mg.path return None @handle_cim_errors def access_group_create(self, name, init_id, init_type, system, flags=0): """ Using 1.5.0 'Group Masking and Mapping' profile. Actually, only EMC VMAX/DMX support this now(July 2014). Steps: 0. Check exist SPC of init_id for duplication call and conflict. 1. Create CIM_InitiatorMaskingGroup """ org_init_id = init_id init_id = smis_ag.lsm_init_id_to_snia(init_id) self._c.profile_check(SmisCommon.SNIA_GROUP_MASK_PROFILE, SmisCommon.SMIS_SPEC_VER_1_5, raise_error=True) if init_type != AccessGroup.INIT_TYPE_WWPN and \ init_type != AccessGroup.INIT_TYPE_ISCSI_IQN: raise LsmError(ErrorNumber.NO_SUPPORT, "SMI-S plugin only support creating FC/FCoE WWPN " "and iSCSI AccessGroup") cim_sys = smis_sys.cim_sys_of_sys_id(self._c, system.id) if cim_sys.path.classname == 'Clar_StorageSystem': # EMC VNX/CX does not support Group M&M, which incorrectly exposed # in CIM_RegisteredProfile raise LsmError(ErrorNumber.NO_SUPPORT, "access_group_create() is not supported by " "EMC VNX/CX which lacks the support of SNIA 1.5+ " "Group Masking and Mapping profile") flag_fc_support = smis_cap.fc_tgt_is_supported(self._c) flag_iscsi_support = smis_cap.iscsi_tgt_is_supported(self._c) if init_type == AccessGroup.INIT_TYPE_WWPN and not flag_fc_support: raise LsmError(ErrorNumber.NO_SUPPORT, "Target SMI-S provider does not support " "FC target port, which not allow creating " "WWPN access group") if init_type == AccessGroup.INIT_TYPE_ISCSI_IQN and \ not flag_iscsi_support: raise LsmError(ErrorNumber.NO_SUPPORT, "Target SMI-S provider does not support " "iSCSI target port, which not allow creating " "iSCSI IQN access group") cim_init_path = smis_ag.cim_init_path_check_or_create( self._c, system.id, init_id, init_type) # Create CIM_InitiatorMaskingGroup cim_gmms = self._c.cim_gmms_of_sys_id(system.id) in_params = {'GroupName': name, 'Members': [cim_init_path], 'Type': dmtf.MASK_GROUP_TYPE_INIT} cim_init_mg_pros = smis_ag.cim_init_mg_pros() try: cim_init_mg_path = self._c.invoke_method_wait( 'CreateGroup', cim_gmms.path, in_params, out_key='MaskingGroup', expect_class='CIM_InitiatorMaskingGroup') except (LsmError, CIMError): # Check possible failure # 1. Initiator already exist in other group. exist_cim_init_mg_paths = self._c.AssociatorNames( cim_init_path, AssocClass='CIM_MemberOfCollection', ResultClass='CIM_InitiatorMaskingGroup') if len(exist_cim_init_mg_paths) != 0: raise LsmError(ErrorNumber.EXISTS_INITIATOR, "Initiator %s " % org_init_id + "already exist in other access group") # 2. Requested name used by other group. exist_cim_init_mgs = self._cim_init_mg_of( system.id, property_list=['ElementName']) for exist_cim_init_mg in exist_cim_init_mgs: if exist_cim_init_mg['ElementName'] == name: raise LsmError(ErrorNumber.NAME_CONFLICT, "Requested name %s is used by " % name + "another access group") raise cim_init_mg = self._c.GetInstance( cim_init_mg_path, PropertyList=cim_init_mg_pros) return smis_ag.cim_init_mg_to_lsm_ag(self._c, cim_init_mg, system.id) @handle_cim_errors def access_group_delete(self, access_group, flags=0): self._c.profile_check( SmisCommon.SNIA_GROUP_MASK_PROFILE, SmisCommon.SMIS_SPEC_VER_1_5, raise_error=True) cim_init_mg_path = smis_ag.lsm_ag_to_cim_init_mg_path( self._c, access_group) # Check whether still have volume masked. cim_spcs_path = self._c.AssociatorNames( cim_init_mg_path, AssocClass='CIM_AssociatedInitiatorMaskingGroup', ResultClass='CIM_SCSIProtocolController') for cim_spc_path in cim_spcs_path: if len(self._c.AssociatorNames( cim_spc_path, AssocClass='CIM_ProtocolControllerForUnit', ResultClass='CIM_StorageVolume')) >= 1: raise LsmError(ErrorNumber.IS_MASKED, "Access Group %s has volume masked" % access_group.id) cim_gmms = self._c.cim_gmms_of_sys_id(access_group.system_id) in_params = { 'MaskingGroup': cim_init_mg_path, 'Force': True, } self._c.invoke_method_wait('DeleteGroup', cim_gmms.path, in_params) return None libstoragemgmt-1.2.3/test/0000775000175000017500000000000012542455463012531 500000000000000libstoragemgmt-1.2.3/test/runtests.sh0000775000175000017500000001134012537737032014675 00000000000000#!/bin/bash # Copyright (C) 2011-2014 Red Hat, Inc. # This library is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2.1 of the License, or any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library; If not, see . # # Author: tasleson # # Unit test case driver # Make sure these are available in the envirnoment before we start lsmd export G_SLICE=always-malloc export G_DEBUG=gc-friendly export CK_DEFAULT_TIMEOUT=600 export CK_FORK=no rundir=$RANDOM base=/tmp/$rundir LSMD_PID=65535 export LSM_TEST_RUNDIR=$rundir export LSM_UDS_PATH=$base/lsm/ipc/ LSMD_TMP_LOG_FILE="$base/lsmd.log" cleanup() { #Clean up the daemon if it is running if [ $LSMD_PID -ne 65535 ] then kill -s KILL $LSMD_PID fi cat $LSMD_TMP_LOG_FILE if [ -e $LSM_UDS_PATH ] then rm -rf $base fi if [ -e $rootdir/_build ] then rm $lsm_py_folder/lsm/plugin rm $lsm_py_folder/lsm/lsmcli chmod -w $lsm_py_folder/lsm fi } good() { echo "executing: $1" eval $1 ec=$? if [ $ec -ne 0 ]; then echo "Fail exit[$ec]: $1" cleanup exit 1 fi } # Add a signal handler to clean-up trap "cleanup; exit 1" INT # Unset these as they can cause the test case to fail # specifically the password one, but remove both. unset LSMCLI_PASSWORD unset LSMCLI_URI #Put us in a consistent spot cd "$(dirname "$0")" #Get base root directory testdir=`pwd` rootdir=${testdir%/*} #Are we running within distcheck? c_unit=$rootdir/test/tester LSMD_DAEMON=$rootdir/daemon/lsmd shared_libs=$rootdir/c_binding/.libs/ bin_plugin=$rootdir/plugin/simc/.libs/ lsm_py_folder=$rootdir/python_binding lsm_plugin_py_folder=$rootdir/plugin lsmcli_py_folder=$rootdir/tools/lsmcli if [ -e $rootdir/_build ] then c_unit=$rootdir/_build/test/tester LSMD_DAEMON=$rootdir/_build/daemon/lsmd shared_libs=$rootdir/_build/c_binding/.libs/ bin_plugin=$rootdir/_build/plugin/simc/.libs/ # In distcheck, all folder is read only(except _build and _inst). # which prevent us from linking plugin and lsmcli into python/lsm folder. chmod +w $rootdir/python_binding/lsm fi #With a distcheck you cannot muck with the source file system, so we will copy #plugins somewhere else. plugins=$base/plugins #Export needed vars export PYTHONPATH=$lsm_py_folder export LD_LIBRARY_PATH=$base/lib export LSM_SIM_DATA="$base/lsm_sim_data" echo "testdir= $testdir" echo "rootdir= $rootdir" echo "c_unit= $c_unit" #Create the directory for the unix domain sockets good "mkdir -p $LSM_UDS_PATH" good "mkdir -p $plugins" good "mkdir -p $LD_LIBRARY_PATH" #Copy shared libraries good "cp $shared_libs/*.so.* $LD_LIBRARY_PATH" #Link plugin folder as python/lsm/plugin folder if [ ! -L "$lsm_py_folder/lsm/plugin" ];then good "ln -s $lsm_plugin_py_folder $lsm_py_folder/lsm/" fi #Link lsmcli folder as python/lsm/lsmcli folder if [ ! -L "$lsm_py_folder/lsm/lsmcli" ];then good "ln -s $lsmcli_py_folder $lsm_py_folder/lsm/" fi #Copy plugins to one directory. good "find $rootdir/ \( ! -regex '.*/\..*' \) -type f -name \*_lsmplugin -exec cp {} $plugins \;" #Copy the actual binary, not the shell script pointing to binary otherwise #valgrind does not work. good "cp $bin_plugin/*_lsmplugin $plugins" good "ls -lh $plugins" #Check to make sure that constants are correct good "perl ../tools/utility/check_const.pl" #Start daemon $LSMD_DAEMON \ --plugindir $plugins \ --socketdir $LSM_UDS_PATH \ -d >$LSMD_TMP_LOG_FILE & # Let the daemon get settled before running the tests sleep 2 LSMD_PID=$(ps aux | grep $LSM_UDS_PATH | grep -v grep | awk '{print $2}') #Run C unit test if [ -z "$LSM_VALGRIND" ]; then good "$c_unit" else good "valgrind --leak-check=full --show-reachable=no --log-file=/tmp/leaking_client $rootdir/test/.libs/tester" fi #Run cmdline against the simulator if we are not checking for leaks if [ -z "$LSM_VALGRIND" ]; then export LSMCLI_URI='sim://' good "$rootdir/test/cmdtest.py -c $plugins/sim_lsmplugin" good "$rootdir/test/cmdtest.py -c $rootdir/tools/lsmcli/lsmcli" #Run the plug-in test against the python simulator good "$rootdir/test/plugin_test.py -v --uri sim://" fi #Run the plug-in test against the C simulator" good "$rootdir/test/plugin_test.py -v --uri simc://" #Pretend we were never here cleanup libstoragemgmt-1.2.3/test/plugin_test.py0000775000175000017500000014724512537737032015377 00000000000000#!/usr/bin/env python2 # Copyright (C) 2013-2014 Red Hat, Inc. # This library is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2.1 of the License, or any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library; If not, see . import lsm import functools import time import random import string import traceback import unittest import argparse import collections import atexit import sys import re import os import tempfile from lsm import LsmError, ErrorNumber from lsm import Capabilities as Cap results = {} stats = {} MIN_POOL_SIZE = 4096 MIN_OBJECT_SIZE = 512 def mb_in_bytes(mib): return 1024 * 1024 * mib def record_result(method): def recorder(*args, **kwargs): try: result = method(*args, **kwargs) results[method.__name__] = dict(rc=True, msg=None) return result except Exception as e: results[method.__name__] = dict(rc=False, stack_trace=traceback.format_exc(), msg=str(e)) return recorder def update_stats(method_name, duration, number_results): if method_name in stats: stats[method_name]["count"] += 1 else: stats[method_name] = dict(count=1, total_time=0, number_items=0) stats[method_name]["total_time"] += duration if number_results > 0: stats[method_name]["number_items"] += number_results def rs(component, l=4): """ Generate a random string """ rp = ''.join(random.choice(string.ascii_uppercase) for x in range(l)) if component is not None: return 'lsm_%s_%s' % (component, rp) return rp def r_fcpn(): """ Generate a random 16 character hex number """ rnd_fcpn = '%016x' % random.randrange(2 ** 64) return ':'.join(rnd_fcpn[i:i + 2] for i in range(0, len(rnd_fcpn), 2)) class Duration(object): def __init__(self): self.start = 0 self.end = 0 def __enter__(self): self.start = time.time() return self def __exit__(self, *ignore): self.end = time.time() def amount(self): return self.end - self.start def supported(cap, capability): for c in capability: if not cap.supported(c): return False return True class TestProxy(object): # Errors that we are forcing to occur not_logging = [lsm.ErrorNumber.NO_SUPPORT, lsm.ErrorNumber.NAME_CONFLICT, lsm.ErrorNumber.NO_STATE_CHANGE, lsm.ErrorNumber.IS_MASKED, lsm.ErrorNumber.EXISTS_INITIATOR] # Hash of all calls that can be async async_calls = {'volume_create': (unicode, lsm.Volume), 'volume_resize': (unicode, lsm.Volume), 'volume_replicate': (unicode, lsm.Volume), 'volume_replicate_range': (unicode,), 'volume_delete': (unicode,), 'volume_child_dependency_rm': (unicode,), 'fs_delete': (unicode,), 'fs_resize': (unicode, lsm.FileSystem), 'fs_create': (unicode, lsm.FileSystem), 'fs_clone': (unicode, lsm.FileSystem), 'fs_file_clone': (unicode,), 'fs_snapshot_create': (unicode, lsm.FsSnapshot), 'fs_snapshot_delete': (unicode,), 'fs_snapshot_restore': (unicode,), 'fs_child_dependency_rm': (unicode,)} ## The constructor. # @param self The object self # @param obj The object instance to wrap def __init__(self, obj=None): """ Constructor which takes an object to wrap. """ self.o = obj ## Called each time an attribute is requested of the object # @param self The object self # @param name Name of the attribute being accessed # @return The result of the method def __getattr__(self, name): """ Called each time an attribute is requested of the object """ if hasattr(self.o, name): return functools.partial(self.present, name) else: raise AttributeError("No such method %s" % name) @staticmethod def log_result(method, v): if method not in results: results[method] = [] results[method].append(v) ## Method which is called to invoke the actual method of interest. # # The intentions of this method is this: # - Invoke the method just like it normally would without this # so signature in & out is identical # - Collect results of the method call # - Collect stats on the execution time of call # # @param self The object self # @param _proxy_method_name Method to invoke # @param args Arguments # @param kwargs Keyword arguments # @return The result of the method invocation def present(self, _proxy_method_name, *args, **kwargs): """ Method which is called to invoke the actual method of interest. """ rc = None job_possible = _proxy_method_name in TestProxy.async_calls # Timer block with Duration() as method_time: try: rc = getattr(self.o, _proxy_method_name)(*args, **kwargs) TestProxy.log_result(_proxy_method_name, dict(rc=True, stack_trace=None, msg=None)) except lsm.LsmError as le: # We are forcing some types of error, for these we won't log # but will allow the test case asserts to check to make sure # we actually got them. if le.code not in self.not_logging: TestProxy.log_result( _proxy_method_name, dict(rc=False, stack_trace=traceback.format_exc(), msg=str(le))) raise # If the job can do async, we will block looping on it. if job_possible and rc is not None: # Note: Some return a single unicode or None, # others return a tuple (job, object) if type(rc) != tuple and type(rc) != list: rc = (rc, None) rc = self.wait_for_it(_proxy_method_name, *rc) # Fix up return value to match what it would normally be if job_possible: if 2 == len(TestProxy.async_calls[_proxy_method_name]): rc = (None, rc) # We don't care about time per operation when there is only one # possible. if not job_possible and isinstance(rc, collections.Sequence) \ and len(rc) > 2: num_results = len(rc) else: num_results = 0 update_stats(_proxy_method_name, method_time.amount(), num_results) return rc def wait_for_it(self, msg, job, item): if not job: return item else: while True: (s, percent, i) = self.job_status(job) if s == lsm.JobStatus.INPROGRESS: time.sleep(0.25) elif s == lsm.JobStatus.COMPLETE: self.job_free(job) return i else: raise Exception(msg + " job error code= " + str(s)) def check_type(value, *expected): assert type(value) in expected, "type expected (%s), type actual (%s)" % \ (str(type(value)), str(expected)) class TestPlugin(unittest.TestCase): """ Anything that starts with test_ will be run as a separate unit test with the setUp and tearDown methods called before and after respectively """ URI = 'sim://' PASSWORD = None def _object_size(self, pool): return mb_in_bytes(MIN_OBJECT_SIZE) def setUp(self): for skip_test_case in TestPlugin.SKIP_TEST_CASES: if self.id().endswith(skip_test_case): self.skipTest("Tested has been skiped as requested") self.c = TestProxy(lsm.Client(TestPlugin.URI, TestPlugin.PASSWORD)) self.systems = self.c.systems() self.pools = self.c.pools() self.pool_by_sys_id = {} for s in self.systems: self.pool_by_sys_id[s.id] = [p for p in self.pools if p.system_id == s.id] # TODO Store what exists, so that we don't remove it def _get_pool_by_usage(self, system_id, element_type, unsupported_features=0): largest_free = 0 rc = None for p in self.pool_by_sys_id[system_id]: # If the pool matches our criteria and min size we will consider # it, but we will select the one with the most free space for # testing and one that support volume expansion if p.element_type & element_type and \ p.free_space > mb_in_bytes(MIN_POOL_SIZE) and \ (not p.unsupported_actions & unsupported_features): if p.free_space > largest_free: largest_free = p.free_space rc = p return rc def tearDown(self): # TODO Walk the array looking for stuff we have created and remove it # What should we do if an array supports a create operation, but not # the corresponding remove? self.c.close() def test_plugin_info(self): (desc, version) = self.c.plugin_info() self.assertTrue(desc is not None and len(desc) > 0) self.assertTrue(version is not None and len(version) > 0) def test_timeout(self): tmo = 40000 self.c.time_out_set(tmo) self.assertEquals(self.c.time_out_get(), tmo) def test_systems_list(self): arrays = self.c.systems() self.assertTrue(len(arrays) > 0, "We need at least one array for " "testing!") def test_pools_list(self): pools_list = self.c.pools() self.assertTrue(len(pools_list) > 0, "We need at least 1 pool to test") def _find_or_create_volumes(self): """ Find existing volumes, if not found, try to create one. Return (volumes, flag_created) If 'flag_created' is True, then returned volumes is newly created. """ volumes = self.c.volumes() flag_created = False if len(self.c.volumes()) == 0: for s in self.systems: cap = self.c.capabilities(s) if supported(cap, [Cap.VOLUME_CREATE, Cap.VOLUME_DELETE]): self._volume_create(s.id) flag_created = True break volumes = self.c.volumes() return volumes, flag_created def test_volume_list(self): (volumes, flag_created) = self._find_or_create_volumes() self.assertTrue(len(volumes) > 0, "We need at least 1 volume to test") if flag_created: self._volume_delete(volumes[0]) def test_volume_vpd83(self): (volumes, flag_created) = self._find_or_create_volumes() self.assertTrue(len(volumes) > 0, "We need at least 1 volume to test") for v in volumes: self.assertTrue(lsm.Volume.vpd83_verify(v.vpd83), "VPD is not as expected '%s' for volume id: '%s'" % (v.vpd83, v.id)) if flag_created: self._volume_delete(volumes[0]) def test_disks_list(self): for s in self.systems: cap = self.c.capabilities(s) if supported(cap, [Cap.DISKS]): disks = self.c.disks() self.assertTrue(len(disks) > 0, "We need at least 1 disk to test") def _volume_create(self, system_id, element_type=lsm.Pool.ELEMENT_TYPE_VOLUME, unsupported_features=0): if system_id in self.pool_by_sys_id: p = self._get_pool_by_usage(system_id, element_type, unsupported_features) self.assertTrue(p is not None, "Unable to find a suitable pool") if p: vol_size = self._object_size(p) vol = self.c.volume_create(p, rs('v'), vol_size, lsm.Volume.PROVISION_DEFAULT)[1] self.assertTrue(self._volume_exists(vol.id), p.id) self.assertTrue(vol.pool_id == p.id) return vol, p def _fs_create(self, system_id): if system_id in self.pool_by_sys_id: fs = None pool = self._get_pool_by_usage(system_id, lsm.Pool.ELEMENT_TYPE_FS) self.assertTrue(pool is not None, "Unable to find a suitable pool " "for fs creation") if pool is not None: fs_size = self._object_size(pool) fs = self.c.fs_create(pool, rs('fs'), fs_size)[1] self.assertTrue(self._fs_exists(fs.id)) self.assertTrue(fs is not None) self.assertTrue(pool is not None) return fs, pool def _volume_delete(self, volume): self.c.volume_delete(volume) self.assertFalse(self._volume_exists(volume.id)) def _fs_delete(self, fs): self.c.fs_delete(fs) self.assertFalse(self._fs_exists(fs.id)) def _fs_snapshot_delete(self, fs, ss): self.c.fs_snapshot_delete(fs, ss) self.assertFalse(self._fs_snapshot_exists(fs, ss.id)) def _volume_exists(self, volume_id, pool_id=None): volumes = self.c.volumes() for v in volumes: if v.id == volume_id: if pool_id is not None: if v.pool_id == pool_id: return True else: return False return True return False def _fs_exists(self, fs_id): fs = self.c.fs() for f in fs: if f.id == fs_id: return True return False def _fs_snapshot_exists(self, fs, ss_id): snapshots = self.c.fs_snapshots(fs) for s in snapshots: if s.id == ss_id: return True return False def test_volume_create_delete(self): if self.pool_by_sys_id: for s in self.systems: vol = None cap = self.c.capabilities(s) if supported(cap, [Cap.VOLUME_CREATE]): vol = self._volume_create(s.id)[0] self.assertTrue(vol is not None) if vol is not None and \ supported(cap, [Cap.VOLUME_DELETE]): self._volume_delete(vol) def test_volume_resize(self): if self.pool_by_sys_id: for s in self.systems: cap = self.c.capabilities(s) # We need to make sure that the pool supports volume grow. unsupported = lsm.Pool.UNSUPPORTED_VOLUME_GROW if supported(cap, [Cap.VOLUME_CREATE, Cap.VOLUME_DELETE, Cap.VOLUME_RESIZE]): vol = self._volume_create( s.id, unsupported_features=unsupported)[0] vol_resize = self.c.volume_resize( vol, vol.size_bytes + mb_in_bytes(16))[1] self.assertTrue(vol.size_bytes < vol_resize.size_bytes) self.assertTrue(vol.id == vol_resize.id, "Expecting re-sized volume to refer to " "same volume. Expected %s, got %s" % (vol.id, vol_resize.id)) if vol.id == vol_resize.id: self._volume_delete(vol_resize) else: # Delete the original self._volume_delete(vol) def _replicate_test(self, capability, replication_type): if self.pool_by_sys_id: for s in self.systems: cap = self.c.capabilities(s) if supported(cap, [Cap.VOLUME_CREATE, Cap.VOLUME_DELETE]): vol, pool = self._volume_create(s.id) # For the moment lets allow the array to pick the pool # to supply the backing store for the replicate if supported(cap, [capability]): volume_clone = self.c.volume_replicate( None, replication_type, vol, rs('v_c_'))[1] self.assertTrue(volume_clone is not None) self.assertTrue(self._volume_exists(volume_clone.id)) if volume_clone is not None: # Lets test for creating a clone with an # existing name error_num = None try: volume_clone_dupe_name = \ self.c.volume_replicate( None, replication_type, vol, volume_clone.name)[1] except LsmError as le: error_num = le.code self.assertTrue(error_num == ErrorNumber.NAME_CONFLICT) self._volume_delete(volume_clone) self._volume_delete(vol) def test_volume_replication(self): self._replicate_test(Cap.VOLUME_REPLICATE_CLONE, lsm.Volume.REPLICATE_CLONE) self._replicate_test(Cap.VOLUME_REPLICATE_COPY, lsm.Volume.REPLICATE_COPY) self._replicate_test(Cap.VOLUME_REPLICATE_MIRROR_ASYNC, lsm.Volume.REPLICATE_MIRROR_ASYNC) self._replicate_test(Cap.VOLUME_REPLICATE_MIRROR_SYNC, lsm.Volume.REPLICATE_MIRROR_SYNC) def test_volume_replicate_range_block_size(self): for s in self.systems: cap = self.c.capabilities(s) if supported(cap, [Cap.VOLUME_COPY_RANGE_BLOCK_SIZE]): size = self.c.volume_replicate_range_block_size(s) self.assertTrue(size > 0) else: self.assertRaises(lsm.LsmError, self.c.volume_replicate_range_block_size, s) def test_replication_range(self): if self.pool_by_sys_id: for s in self.systems: cap = self.c.capabilities(s) if supported(cap, [Cap.VOLUME_COPY_RANGE_BLOCK_SIZE, Cap.VOLUME_CREATE, Cap.VOLUME_DELETE, Cap.VOLUME_COPY_RANGE]): size = self.c.volume_replicate_range_block_size(s) vol, pool = self._volume_create(s.id) br = lsm.BlockRange(0, size, size) if supported( cap, [Cap.VOLUME_COPY_RANGE_CLONE]): self.c.volume_replicate_range( lsm.Volume.REPLICATE_CLONE, vol, vol, [br]) else: self.assertRaises( lsm.LsmError, self.c.volume_replicate_range, lsm.Volume.REPLICATE_CLONE, vol, vol, [br]) br = lsm.BlockRange(size * 2, size, size) if supported( cap, [Cap.VOLUME_COPY_RANGE_COPY]): self.c.volume_replicate_range( lsm.Volume.REPLICATE_COPY, vol, vol, [br]) else: self.assertRaises( lsm.LsmError, self.c.volume_replicate_range, lsm.Volume.REPLICATE_COPY, vol, vol, [br]) self._volume_delete(vol) def test_fs_creation_deletion(self): for s in self.systems: cap = self.c.capabilities(s) if supported(cap, [Cap.FS_CREATE]): fs, pool = self._fs_create(s.id) if fs is not None: if supported(cap, [Cap.FS_DELETE]): self._fs_delete(fs) def test_fs_resize(self): for s in self.systems: cap = self.c.capabilities(s) if supported(cap, [Cap.FS_CREATE]): fs, pool = self._fs_create(s.id) if fs is not None: if supported(cap, [Cap.FS_RESIZE]): fs_size = fs.total_space + mb_in_bytes(16) fs_resized = self.c.fs_resize(fs, fs_size)[1] self.assertTrue(fs_resized.total_space) if supported(cap, [Cap.FS_DELETE]): self._fs_delete(fs) def test_fs_clone(self): for s in self.systems: cap = self.c.capabilities(s) if supported(cap, [Cap.FS_CREATE, Cap.FS_CLONE]): fs, pool = self._fs_create(s.id) if fs is not None: fs_clone = self.c.fs_clone(fs, rs('fs_c'))[1] if supported(cap, [Cap.FS_DELETE]): self._fs_delete(fs_clone) self._fs_delete(fs) def test_fs_snapshot(self): for s in self.systems: cap = self.c.capabilities(s) if supported(cap, [Cap.FS_CREATE, Cap.FS_SNAPSHOT_CREATE]): fs, pool = self._fs_create(s.id) if fs is not None: ss = self.c.fs_snapshot_create(fs, rs('ss'))[1] self.assertTrue(self._fs_snapshot_exists(fs, ss.id)) if supported(cap, [Cap.FS_SNAPSHOT_RESTORE]): self.c.fs_snapshot_restore(fs, ss, None, None, True) # Delete snapshot if supported(cap, [Cap.FS_SNAPSHOT_DELETE]): self._fs_snapshot_delete(fs, ss) def test_target_ports(self): for s in self.systems: cap = self.c.capabilities(s) if supported(cap, [Cap.TARGET_PORTS]): ports = self.c.target_ports() for p in ports: self.assertTrue(p.id is not None) self.assertTrue(p.port_type is not None) self.assertTrue(p.service_address is not None) self.assertTrue(p.network_address is not None) self.assertTrue(p.physical_address is not None) self.assertTrue(p.physical_name is not None) self.assertTrue(p.system_id is not None) def _masking_state(self, cap, ag, vol, masked): if supported(cap, [Cap. VOLUMES_ACCESSIBLE_BY_ACCESS_GROUP]): vol_masked = \ self.c.volumes_accessible_by_access_group(ag) match = [x for x in vol_masked if x.id == vol.id] if masked: self.assertTrue(len(match) == 1) else: self.assertTrue(len(match) == 0) if supported(cap, [Cap. ACCESS_GROUPS_GRANTED_TO_VOLUME]): ag_masked = \ self.c.access_groups_granted_to_volume(vol) match = [x for x in ag_masked if x.id == ag.id] if masked: self.assertTrue(len(match) == 1) else: self.assertTrue(len(match) == 0) def test_mask_unmask(self): for s in self.systems: ag_created = None cap = self.c.capabilities(s) if supported(cap, [Cap.ACCESS_GROUPS, Cap.VOLUME_MASK, Cap.VOLUME_UNMASK, Cap.VOLUME_CREATE, Cap.VOLUME_DELETE]): if supported(cap, [Cap.ACCESS_GROUP_CREATE_ISCSI_IQN]): ag_name = rs("ag") ag_iqn = 'iqn.1994-05.com.domain:01.' + rs(None, 6) ag_created = self.c.access_group_create( ag_name, ag_iqn, lsm.AccessGroup.INIT_TYPE_ISCSI_IQN, s) # Make sure we have an access group to test with, many # smi-s providers don't provide functionality to create them! ag_list = self.c.access_groups('system_id', s.id) if len(ag_list): vol = self._volume_create(s.id)[0] self.assertTrue(vol is not None) chose_ag = ag_created if chose_ag is None: for ag in ag_list: if len(ag.init_ids) >= 1: chose_ag = ag break if chose_ag is None: raise Exception("No access group with 1+ member " "found, cannot do volume mask test") if vol is not None and chose_ag is not None: self.c.volume_mask(chose_ag, vol) self._masking_state(cap, chose_ag, vol, True) # Test duplicate call for NO_STATE_CHANGE error flag_dup_error_found = False try: self.c.volume_mask(chose_ag, vol) except LsmError as lsm_error: self.assertTrue( lsm_error.code == ErrorNumber.NO_STATE_CHANGE) flag_dup_error_found = True self.assertTrue(flag_dup_error_found) self.c.volume_unmask(chose_ag, vol) self._masking_state(cap, chose_ag, vol, False) # Test duplicate call for NO_STATE_CHANGE error flag_dup_error_found = False try: self.c.volume_unmask(chose_ag, vol) except LsmError as lsm_error: self.assertTrue( lsm_error.code == ErrorNumber.NO_STATE_CHANGE) flag_dup_error_found = True self.assertTrue(flag_dup_error_found) if vol: self._volume_delete(vol) if ag_created: self.c.access_group_delete(ag_created) ag_created = None def _create_access_group(self, cap, name, s, init_type): ag_created = None if init_type == lsm.AccessGroup.INIT_TYPE_ISCSI_IQN: ag_created = self.c.access_group_create( name, 'iqn.1994-05.com.domain:01.' + rs(None, 6), lsm.AccessGroup.INIT_TYPE_ISCSI_IQN, s) elif init_type == lsm.AccessGroup.INIT_TYPE_WWPN: ag_created = self.c.access_group_create( name, r_fcpn(), lsm.AccessGroup.INIT_TYPE_WWPN, s) self.assertTrue(ag_created is not None) if ag_created is not None: ag_list = self.c.access_groups() match = [x for x in ag_list if x.id == ag_created.id] self.assertTrue(len(match) == 1, "Newly created access group %s " "not in the access group listing" % (ag_created.name)) return ag_created def _delete_access_group(self, ag): self.c.access_group_delete(ag) ag_list = self.c.access_groups() match = [x for x in ag_list if x.id == ag.id] self.assertTrue(len(match) == 0, "Expected access group that was " "deleted to not show up in the " "access group list!") def _test_ag_create_dup(self, lsm_ag, lsm_system): """ Test NAME_CONFLICT and EXISTS_INITIATOR of access_group_create(). """ flag_got_expected_error = False new_init_id = None if lsm_ag.init_type == lsm.AccessGroup.INIT_TYPE_ISCSI_IQN: new_init_id = 'iqn.1994-05.com.domain:01.' + rs(None, 6) else: new_init_id = r_fcpn() try: self.c.access_group_create( lsm_ag.name, new_init_id, lsm_ag.init_type, lsm_system) except LsmError as lsm_error: self.assertTrue(lsm_error.code == ErrorNumber.NAME_CONFLICT) flag_got_expected_error = True self.assertTrue(flag_got_expected_error) flag_got_expected_error = False try: self.c.access_group_create( rs('ag'), lsm_ag.init_ids[0], lsm_ag.init_type, lsm_system) except LsmError as lsm_error: self.assertTrue(lsm_error.code == ErrorNumber.EXISTS_INITIATOR) flag_got_expected_error = True self.assertTrue(flag_got_expected_error) def _test_ag_create_delete(self, cap, s): ag = None if supported(cap, [Cap.ACCESS_GROUPS, Cap.ACCESS_GROUP_CREATE_ISCSI_IQN]): ag = self._create_access_group( cap, rs('ag'), s, lsm.AccessGroup.INIT_TYPE_ISCSI_IQN) if ag is not None and \ supported(cap, [Cap.ACCESS_GROUP_DELETE]): self._test_ag_create_dup(ag, s) self._delete_access_group(ag) if supported(cap, [Cap.ACCESS_GROUPS, Cap.ACCESS_GROUP_CREATE_WWPN]): ag = self._create_access_group( cap, rs('ag'), s, lsm.AccessGroup.INIT_TYPE_WWPN) if ag is not None and \ supported(cap, [Cap.ACCESS_GROUP_DELETE]): self._test_ag_create_dup(ag, s) self._delete_access_group(ag) def test_iscsi_chap(self): ag = None for s in self.systems: cap = self.c.capabilities(s) if supported(cap, [Cap.ACCESS_GROUPS, Cap.ACCESS_GROUP_CREATE_ISCSI_IQN, Cap.VOLUME_ISCSI_CHAP_AUTHENTICATION]): ag = self._create_access_group( cap, rs('ag'), s, lsm.AccessGroup.INIT_TYPE_ISCSI_IQN) self.c.iscsi_chap_auth(ag.init_ids[0], 'foo', rs(None, 12), None, None) if ag is not None and \ supported(cap, [Cap.ACCESS_GROUP_DELETE]): self._test_ag_create_dup(ag, s) self._delete_access_group(ag) def test_access_group_create_delete(self): for s in self.systems: cap = self.c.capabilities(s) self._test_ag_create_delete(cap, s) def test_access_group_list(self): for s in self.systems: cap = self.c.capabilities(s) if supported(cap, [Cap.ACCESS_GROUPS]): ag_list = self.c.access_groups('system_id', s.id) if len(ag_list) == 0: self._test_ag_create_delete(cap, s) else: self.assertTrue(len(ag_list) > 0, "Need at least 1 access group for testing " "and no support exists for creation of " "access groups for this system") def _ag_init_add(self, ag): t = None t_id = '' if ag.init_type == lsm.AccessGroup.INIT_TYPE_ISCSI_IQN: t_id = 'iqn.1994-05.com.domain:01.89bd02' t = lsm.AccessGroup.INIT_TYPE_ISCSI_IQN else: # We will try FC PN t_id = r_fcpn() t = lsm.AccessGroup.INIT_TYPE_WWPN self.c.access_group_initiator_add(ag, t_id, t) ag_after = self.c.access_groups('id', ag.id)[0] match = [x for x in ag_after.init_ids if x == t_id] self.assertTrue(len(match) == 1) return t_id def _ag_init_delete(self, ag, init_id, init_type): self.c.access_group_initiator_delete(ag, init_id, init_type) ag_after = self.c.access_groups('id', ag.id)[0] match = [x for x in ag_after.init_ids if x == init_id] self.assertTrue(len(match) == 0) def test_access_group_initiator_add_delete(self): usable_ag_types = [lsm.AccessGroup.INIT_TYPE_WWPN, lsm.AccessGroup.INIT_TYPE_ISCSI_IQN] for s in self.systems: ag_to_delete = None cap = self.c.capabilities(s) if supported(cap, [Cap.ACCESS_GROUPS]): ag_list = self.c.access_groups('system_id', s.id) if supported(cap, [Cap.ACCESS_GROUP_CREATE_WWPN])\ or supported(cap, [Cap.ACCESS_GROUP_CREATE_ISCSI_IQN]): if supported( cap, [Cap.ACCESS_GROUP_CREATE_ISCSI_IQN, Cap.ACCESS_GROUP_DELETE]): ag_to_delete = self._create_access_group( cap, rs('ag'), s, lsm.AccessGroup.INIT_TYPE_ISCSI_IQN) ag_list = self.c.access_groups('system_id', s.id) if supported(cap, [Cap. ACCESS_GROUP_INITIATOR_ADD_ISCSI_IQN]): init_id = self._ag_init_add(ag_to_delete) if supported( cap, [Cap.ACCESS_GROUP_INITIATOR_DELETE]): self._ag_init_delete( ag_to_delete, init_id, lsm.AccessGroup.INIT_TYPE_ISCSI_IQN) if supported( cap, [Cap.ACCESS_GROUP_CREATE_WWPN, Cap.ACCESS_GROUP_DELETE]): ag_to_delete = self._create_access_group( cap, rs('ag'), s, lsm.AccessGroup.INIT_TYPE_WWPN) ag_list = self.c.access_groups('system_id', s.id) if ag_to_delete is not None: self._delete_access_group(ag_to_delete) else: if len(ag_list): # Try and find an initiator group that has a usable # access group type instead of unknown or other... ag = ag_list[0] for a_tmp in ag_list: if a_tmp.init_type in usable_ag_types: ag = a_tmp break if supported(cap, [Cap. ACCESS_GROUP_INITIATOR_ADD_WWPN]): init_id = self._ag_init_add(ag) if supported( cap, [Cap.ACCESS_GROUP_INITIATOR_DELETE]): self._ag_init_delete( ag, init_id, lsm.AccessGroup.INIT_TYPE_WWPN) if supported( cap, [Cap.ACCESS_GROUP_INITIATOR_ADD_ISCSI_IQN]): init_id = self._ag_init_add(ag) if supported(cap, [Cap.ACCESS_GROUP_INITIATOR_DELETE]): self._ag_init_delete( ag, init_id, lsm.AccessGroup.INIT_TYPE_ISCSI_IQN) def test_duplicate_volume_name(self): if self.pool_by_sys_id: for s in self.systems: vol = None cap = self.c.capabilities(s) if supported(cap, [Cap.VOLUME_CREATE]): vol, pool = self._volume_create(s.id) self.assertTrue(vol is not None) # Try to create another with same name try: vol_dupe = self.c.volume_create( pool, vol.name, vol.size_bytes, lsm.Volume.PROVISION_DEFAULT)[1] except LsmError as le: self.assertTrue(le.code == ErrorNumber.NAME_CONFLICT) if vol is not None and \ supported(cap, [Cap.VOLUME_DELETE]): self._volume_delete(vol) def test_duplicate_access_group_name(self): for s in self.systems: ag_to_delete = None ag_type = None ag_name = rs('ag_dupe') cap = self.c.capabilities(s) if supported(cap, [Cap.ACCESS_GROUPS, Cap.ACCESS_GROUP_DELETE]): ag_list = self.c.access_groups('system_id', s.id) if supported( cap, [Cap.ACCESS_GROUP_CREATE_ISCSI_IQN]): ag_type = lsm.AccessGroup.INIT_TYPE_ISCSI_IQN elif supported(cap, [Cap.ACCESS_GROUP_CREATE_WWPN]): ag_type = lsm.AccessGroup.INIT_TYPE_WWPN else: return ag_created = self._create_access_group( cap, ag_name, s, ag_type) if ag_created is not None: # Try to create a duplicate got_exception = False try: ag_dupe = self._create_access_group( cap, ag_name, s, ag_type) except LsmError as le: got_exception = True self.assertTrue(le.code == ErrorNumber.NAME_CONFLICT) self.assertTrue(got_exception) self._delete_access_group(ag_created) def test_ag_vol_delete_with_vol_masked(self): for s in self.systems: cap = self.c.capabilities(s) if supported(cap, [Cap.ACCESS_GROUPS, Cap.ACCESS_GROUP_CREATE_ISCSI_IQN, Cap.ACCESS_GROUP_DELETE, Cap.VOLUME_UNMASK, Cap.VOLUME_CREATE, Cap.VOLUME_DELETE, Cap.VOLUME_MASK, Cap.VOLUME_UNMASK]): ag_name = rs("ag") ag_iqn = 'iqn.1994-05.com.domain:01.' + rs(None, 6) ag = self.c.access_group_create( ag_name, ag_iqn, lsm.AccessGroup.INIT_TYPE_ISCSI_IQN, s) pool = self._get_pool_by_usage(s.id, lsm.Pool.ELEMENT_TYPE_VOLUME) if ag and pool: vol_size = self._object_size(pool) vol = self.c.volume_create( pool, rs('v'), vol_size, lsm.Volume.PROVISION_DEFAULT)[1] if vol: got_exception = False self.c.volume_mask(ag, vol) # Try to delete the access group try: self.c.access_group_delete(ag) except LsmError as le: if le.code == lsm.ErrorNumber.IS_MASKED: got_exception = True self.assertTrue(le.code == lsm.ErrorNumber.IS_MASKED) self.assertTrue(got_exception) # Try to delete the volume got_exception = False try: self.c.volume_delete(vol) except LsmError as le: if le.code == lsm.ErrorNumber.IS_MASKED: got_exception = True self.assertTrue(le.code == lsm.ErrorNumber.IS_MASKED) self.assertTrue(got_exception) # Clean up self.c.volume_unmask(ag, vol) self.c.volume_delete(vol) self.c.access_group_delete(ag) def test_volume_vpd83_verify(self): failing = [None, "012345678901234567890123456789AB", "012345678901234567890123456789ax", "012345678901234567890123456789ag", "1234567890123456789012345abcdef", "01234567890123456789012345abcdefa", "55cd2e404beec32e0", "55cd2e404beec32ex", "35cd2e404beec32A"] for f in failing: self.assertFalse(lsm.Volume.vpd83_verify(f)) self.assertTrue( lsm.Volume.vpd83_verify("61234567890123456789012345abcdef")) self.assertTrue( lsm.Volume.vpd83_verify("55cd2e404beec32e")) self.assertTrue( lsm.Volume.vpd83_verify("35cd2e404beec32e")) self.assertTrue( lsm.Volume.vpd83_verify("25cd2e404beec32e")) def test_available_plugins(self): plugins = self.c.available_plugins(':') self.assertTrue(plugins is not None) self.assertTrue(len(plugins) > 0) self.assertTrue(':' in plugins[0]) def test_volume_enable_disable(self): for s in self.systems: cap = self.c.capabilities(s) if supported(cap, [Cap.VOLUME_CREATE, Cap.VOLUME_DELETE, Cap.VOLUME_ENABLE, Cap.VOLUME_DISABLE]): vol, pool = self._volume_create(s.id) self.c.volume_disable(vol) self.c.volume_enable(vol) self._volume_delete(vol) def test_daemon_not_running(self): current = None got_exception = False # Force a ErrorNumber.DAEMON_NOT_RUNNING if 'LSM_UDS_PATH' in os.environ: current = os.environ['LSM_UDS_PATH'] tmp_dir = tempfile.mkdtemp() os.environ['LSM_UDS_PATH'] = tmp_dir try: tmp_c = lsm.Client(TestPlugin.URI, TestPlugin.PASSWORD) except LsmError as expected_error: got_exception = True self.assertTrue(expected_error.code == ErrorNumber.DAEMON_NOT_RUNNING, 'Actual error %d' % (expected_error.code)) self.assertTrue(got_exception) os.rmdir(tmp_dir) if current: os.environ['LSM_UDS_PATH'] = current else: del os.environ['LSM_UDS_PATH'] def test_non_existent_plugin(self): got_exception = False try: uri = "%s://user@host" % rs(None, 6) tmp_c = lsm.Client(uri, TestPlugin.PASSWORD) except LsmError as expected_error: got_exception = True self.assertTrue(expected_error.code == ErrorNumber.PLUGIN_NOT_EXIST, 'Actual error %d' % (expected_error.code)) self.assertTrue(got_exception) def test_volume_depends(self): for s in self.systems: cap = self.c.capabilities(s) if supported(cap, [Cap.VOLUME_CREATE, Cap.VOLUME_DELETE, Cap.VOLUME_CHILD_DEPENDENCY, Cap.VOLUME_CHILD_DEPENDENCY_RM]) and \ (supported(cap, [Cap.VOLUME_REPLICATE_COPY]) or supported(cap, [Cap.VOLUME_REPLICATE_CLONE])): vol, pol = self._volume_create(s.id) if supported(cap, [Cap.VOLUME_REPLICATE_CLONE]): vol_child = self.c.volume_replicate( None, lsm.Volume.REPLICATE_CLONE, vol, rs('v_tc_'))[1] else: vol_child = self.c.volume_replicate( None, lsm.Volume.REPLICATE_COPY, vol, rs('v_fc_'))[1] self.assertTrue(vol_child is not None) if self.c.volume_child_dependency(vol): self.c.volume_child_dependency_rm(vol) else: self.assertTrue(self.c.volume_child_dependency_rm(vol) is None) self._volume_delete(vol) if vol_child: self._volume_delete(vol_child) def test_fs_depends(self): for s in self.systems: cap = self.c.capabilities(s) if supported(cap, [Cap.FS_CREATE, Cap.FS_DELETE, Cap.FS_CHILD_DEPENDENCY, Cap.FS_CHILD_DEPENDENCY_RM, Cap.FS_CLONE]): fs, pol = self._fs_create(s.id) fs_child = self.c.fs_clone(fs, rs('fs_c_'))[1] self.assertTrue(fs_child is not None) if self.c.fs_child_dependency(fs, None): self.c.fs_child_dependency_rm(fs, None) else: self.assertTrue(self.c.fs_child_dependency_rm(fs, None) is None) self._fs_delete(fs) if fs_child: self._fs_delete(fs_child) def test_nfs_auth_types(self): for s in self.systems: cap = self.c.capabilities(s) if supported(cap, [Cap.EXPORT_AUTH]): auth_types = self.c.export_auth() self.assertTrue(auth_types is not None) def test_export_list(self): for s in self.systems: cap = self.c.capabilities(s) if supported(cap, [Cap.EXPORTS]): exports = self.c.exports() # TODO verify export values def test_create_delete_exports(self): for s in self.systems: cap = self.c.capabilities(s) if supported(cap, [Cap.FS_CREATE, Cap.EXPORTS, Cap.EXPORT_FS, Cap.EXPORT_REMOVE]): fs, pool = self._fs_create(s.id) if supported(cap, [Cap.EXPORT_CUSTOM_PATH]): path = "/mnt/%s" % rs(None, 6) exp = self.c.export_fs(fs.id, path, [], [], ['192.168.2.1']) else: exp = self.c.export_fs(fs.id, None, [], [], ['192.168.2.1']) self.c.export_remove(exp) self._fs_delete(fs) def test_pool_member_info(self): for s in self.systems: cap = self.c.capabilities(s) if supported(cap, [Cap.POOL_MEMBER_INFO]): for pool in self.c.pools(): (raid_type, member_type, member_ids) = \ self.c.pool_member_info(pool) self.assertTrue(type(raid_type) is int) self.assertTrue(type(member_type) is int) self.assertTrue(type(member_ids) is list) def _skip_current_test(self, messsage): """ If skipTest is supported, skip this test with provided message. Sliently return if not supported. """ if hasattr(unittest.TestCase, 'skipTest') is True: self.skipTest(messsage) return def test_volume_raid_create(self): for s in self.systems: cap = self.c.capabilities(s) # TODO(Gris Ge): Add test code for other RAID type and strip size if supported(cap, [Cap.VOLUME_RAID_CREATE]): supported_raid_types, supported_strip_sizes = \ self.c.volume_raid_create_cap_get(s) if lsm.Volume.RAID_TYPE_RAID1 not in supported_raid_types: self._skip_current_test( "Skip test: current system does not support " "creating RAID1 volume") # Find two free disks free_disks = [] for disk in self.c.disks(): if len(free_disks) == 2: break if disk.status & lsm.Disk.STATUS_FREE: free_disks.append(disk) if len(free_disks) != 2: self._skip_current_test( "Skip test: Failed to find two free disks for RAID 1") return new_vol = self.c.volume_raid_create( rs('v'), lsm.Volume.RAID_TYPE_RAID1, free_disks, lsm.Volume.VCR_STRIP_SIZE_DEFAULT) self.assertTrue(new_vol is not None) # TODO(Gris Ge): Use volume_raid_info() and pool_member_info() # to verify size, raid_type, member type, member ids. if supported(cap, [Cap.VOLUME_DELETE]): self._volume_delete(new_vol) else: self._skip_current_test( "Skip test: not support of VOLUME_RAID_CREATE") def dump_results(): """ unittest.main exits when done so we need to register this handler to get our results out. If PyYAML is available we will output detailed results, else we will output nothing. The detailed output results of what we called, how it finished and how long it took. """ try: import yaml sys.stdout.write(yaml.dump(dict(methods_called=results, stats=stats))) except ImportError: sys.stdout.write("NOTICE: Install PyYAML for detailed test results\n") def add_our_params(): """ There are probably easier ways to extend unittest, but this seems easiest at the moment if we want to retain the default behavior and introduce a couple of parameters. """ unittest.TestProgram.USAGE += """\ Options libStorageMgmt: --password 'Array password' --uri 'Array URI' --skip 'Test case to skip. Repeatable argument' """ if __name__ == "__main__": atexit.register(dump_results) add_our_params() parser = argparse.ArgumentParser(add_help=False) parser.add_argument('--password', default=None) parser.add_argument('--uri') parser.add_argument('--skip', action='append') options, other_args = parser.parse_known_args() if options.uri: TestPlugin.URI = options.uri elif os.getenv('LSM_TEST_URI'): TestPlugin.URI = os.getenv('LSM_TEST_URI') else: TestPlugin.URI = 'sim://' if options.password: TestPlugin.PASSWORD = options.password elif os.getenv('LSM_TEST_PASSWORD'): TestPlugin.PASSWORD = os.getenv('LSM_TEST_PASSWORD') if options.skip: if hasattr(unittest.TestCase, 'skipTest') is False: raise Exception( "Current python version is too old to support 'skipTest'") TestPlugin.SKIP_TEST_CASES = options.skip else: TestPlugin.SKIP_TEST_CASES = [] unittest.main(argv=sys.argv[:1] + other_args) libstoragemgmt-1.2.3/test/Makefile.am0000664000175000017500000000050312537546123014501 00000000000000AM_CPPFLAGS = \ -I$(top_srcdir)/c_binding/include \ -I@srcdir@/c_binding/include \ $(LIBXML_CFLAGS) EXTRA_DIST=cmdtest.py runtests.sh plugin_test.py TESTS = runtests.sh check_PROGRAMS = tester tester_CFLAGS = $(LIBCHECK_CFLAGS) tester_LDADD = ../c_binding/libstoragemgmt.la $(LIBCHECK_LIBS) tester_SOURCES = tester.c libstoragemgmt-1.2.3/test/tester.c0000664000175000017500000030674012537737032014134 00000000000000/* * Copyright (C) 2011-2014 Red Hat, Inc. * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; If not, see . * * Author: tasleson */ #include #include #include #include #include #include #include const char URI[] = "sim://localhost/?statefile=/tmp/%d/lsm_sim_%s"; const char SYSTEM_NAME[] = "LSM simulated storage plug-in"; const char SYSTEM_ID[] = "sim-01"; const char *ISCSI_HOST[2] = { "iqn.1994-05.com.domain:01.89bd01", "iqn.1994-05.com.domain:01.89bd02" }; static int which_plugin = 0; #define POLL_SLEEP 50000 lsm_connect *c = NULL; char *error(lsm_error_ptr e) { static char eb[1024]; memset(eb, 0, sizeof(eb)); if( e != NULL ) { snprintf(eb, sizeof(eb), "Error msg= %s - exception %s - debug %s", lsm_error_message_get(e), lsm_error_exception_get(e), lsm_error_debug_get(e)); lsm_error_free(e); e = NULL; } else { snprintf(eb, sizeof(eb), "No addl. error info."); } return eb; } /** * Macro for calls which we expect success. * @param variable Where the result of the call is placed * @param func Name of function * @param ... Function parameters */ #define G(variable, func, ...) \ variable = func(__VA_ARGS__); \ fail_unless( LSM_ERR_OK == variable, "call:%s rc = %d %s (which %d)", #func, \ variable, error(lsm_error_last_get(c)), which_plugin); /** * Macro for calls which we expect failure. * @param variable Where the result of the call is placed * @param func Name of function * @param ... Function parameters */ #define F(variable, func, ...) \ variable = func(__VA_ARGS__); \ fail_unless( LSM_ERR_OK != variable, "call:%s rc = %d %s (which %d)", #func, \ variable, error(lsm_error_last_get(c)), which_plugin); /** * Generates a random string in the buffer with specified length. * Note: This function should not produce repeating sequences or duplicates * regardless if it's used repeatedly in the same function in the same process * or different forked processes. * @param buff Buffer to write the random string to * @param len Length of the random string */ void generate_random(char *buff, uint32_t len) { uint32_t i = 0; static int seed = 0; static int pid = 0; /* Re-seed the random number generator at least once per unique process */ if( (!seed || !pid) || (pid != getpid()) ) { seed = time(NULL); pid = getpid(); srandom(seed + pid); } if( buff && (len > 1) ) { for(i = 0; i < (len - 1); ++i) { buff[i] = 97 + rand()%26; } buff[len-1] = '\0'; } } char *plugin_to_use() { char *uri_to_use = "sim://"; if( which_plugin == 1 ) { uri_to_use = "simc://"; } else { char *rundir = getenv("LSM_TEST_RUNDIR"); /* The python plug-in keeps state, but the unit tests don't expect this * create a new random file for the state to keep things new. */ if( rundir ) { int rdir = atoi(rundir); static char fn[128]; static char name[32]; generate_random(name, sizeof(name)); snprintf(fn, sizeof(fn), URI, rdir, name); uri_to_use = fn; } else { printf("Missing LSM_TEST_RUNDIR, expect test failures!\n"); } } printf("URI = %s\n", uri_to_use); return uri_to_use; } lsm_pool *get_test_pool(lsm_connect *c) { lsm_pool **pools = NULL; uint32_t count = 0; lsm_pool *test_pool = NULL; int rc = 0; G(rc, lsm_pool_list, c, NULL, NULL, &pools, &count, LSM_CLIENT_FLAG_RSVD); if( LSM_ERR_OK == rc ) { uint32_t i = 0; for(i = 0; i < count; ++i ) { if(strcmp(lsm_pool_name_get(pools[i]), "lsm_test_aggr") == 0 ) { test_pool = lsm_pool_record_copy(pools[i]); G(rc, lsm_pool_record_array_free, pools, count); break; } } } return test_pool; } void dump_error(lsm_error_ptr e) { int rc = 0; if (e != NULL) { printf("Error msg= %s - exception %s - debug %s\n", lsm_error_message_get(e), lsm_error_exception_get(e), lsm_error_debug_get(e)); G(rc, lsm_error_free, e); e = NULL; } else { printf("No additional error information!\n"); } } void setup(void) { /* * Note: Do not use any error reporting functions in this function */ lsm_error_ptr e = NULL; int rc = lsm_connect_password(plugin_to_use(), NULL, &c, 30000, &e, LSM_CLIENT_FLAG_RSVD); if( LSM_ERR_OK == rc ) { if( getenv("LSM_DEBUG_PLUGIN") ) { printf("Attach debugger to plug-in, press when ready..."); getchar(); } } } void teardown(void) { /* * Note: Do not use any error reporting functions in this function */ if( c ) { lsm_connect_close(c, LSM_CLIENT_FLAG_RSVD); c = NULL; } } void wait_for_job(lsm_connect *c, char **job_id) { lsm_job_status status; uint8_t pc = 0; int rc = 0; do { G(rc, lsm_job_status_get, c, *job_id, &status, &pc, LSM_CLIENT_FLAG_RSVD); printf("GENERIC: Job %s in progress, %d done, status = %d\n", *job_id, pc, status); usleep(POLL_SLEEP); } while( status == LSM_JOB_INPROGRESS ); G(rc, lsm_job_free, c, job_id, LSM_CLIENT_FLAG_RSVD); fail_unless( LSM_JOB_COMPLETE == status); fail_unless( 100 == pc); fail_unless( job_id != NULL ); } lsm_volume *wait_for_job_vol(lsm_connect *c, char **job_id) { lsm_job_status status; lsm_volume *vol = NULL; uint8_t pc = 0; int rc = 0; do { G(rc, lsm_job_status_volume_get, c, *job_id, &status, &pc, &vol, LSM_CLIENT_FLAG_RSVD); printf("VOLUME: Job %s in progress, %d done, status = %d\n", *job_id, pc, status); usleep(POLL_SLEEP); } while( rc == LSM_ERR_OK && status == LSM_JOB_INPROGRESS ); printf("Volume complete: Job %s percent %d done, status = %d, rc=%d\n", *job_id, pc, status, rc); G(rc, lsm_job_free, c, job_id, LSM_CLIENT_FLAG_RSVD); fail_unless( LSM_JOB_COMPLETE == status); fail_unless( 100 == pc); return vol; } lsm_pool *wait_for_job_pool(lsm_connect *c, char **job_id) { lsm_job_status status; lsm_pool *pool = NULL; uint8_t pc = 0; int rc = 0; do { G(rc, lsm_job_status_pool_get, c, *job_id, &status, &pc, &pool, LSM_CLIENT_FLAG_RSVD); printf("POOL: Job %s in progress, %d done, status = %d\n", *job_id, pc, status); usleep(POLL_SLEEP); } while( status == LSM_JOB_INPROGRESS ); G(rc, lsm_job_free, c, job_id, LSM_CLIENT_FLAG_RSVD); fail_unless( LSM_JOB_COMPLETE == status); fail_unless( 100 == pc); return pool; } lsm_fs *wait_for_job_fs(lsm_connect *c, char **job_id) { lsm_job_status status; lsm_fs *fs = NULL; uint8_t pc = 0; int rc = 0; do { G(rc, lsm_job_status_fs_get, c, *job_id, &status, &pc, &fs, LSM_CLIENT_FLAG_RSVD); printf("FS: Job %s in progress, %d done, status = %d\n", *job_id, pc, status); usleep(POLL_SLEEP); } while( status == LSM_JOB_INPROGRESS ); G(rc, lsm_job_free, c, job_id, LSM_CLIENT_FLAG_RSVD); fail_unless( LSM_JOB_COMPLETE == status); fail_unless( 100 == pc); return fs; } lsm_fs_ss *wait_for_job_ss(lsm_connect *c, char **job_id) { lsm_job_status status; lsm_fs_ss *ss = NULL; uint8_t pc = 0; int rc = 0; do { G(rc, lsm_job_status_ss_get, c, *job_id, &status, &pc, &ss, LSM_CLIENT_FLAG_RSVD); printf("SS: Job %s in progress, %d done, status = %d\n", *job_id, pc, status); usleep(POLL_SLEEP); } while( status == LSM_JOB_INPROGRESS ); G(rc, lsm_job_free, c, job_id, LSM_CLIENT_FLAG_RSVD); fail_unless( LSM_JOB_COMPLETE == status); fail_unless( 100 == pc); return ss; } int compare_string_lists(lsm_string_list *l, lsm_string_list *r) { if( l && r) { int i = 0; if( l == r ) { return 0; } if( lsm_string_list_size(l) != lsm_string_list_size(r) ) { return 1; } for( i = 0; i < lsm_string_list_size(l); ++i ) { if( strcmp(lsm_string_list_elem_get(l, i), lsm_string_list_elem_get(r, i)) != 0) { return 1; } } return 0; } return 1; } void create_volumes(lsm_connect *c, lsm_pool *p, int num) { int i; for( i = 0; i < num; ++i ) { lsm_volume *n = NULL; char *job = NULL; char name[32]; memset(name, 0, sizeof(name)); snprintf(name, sizeof(name), "test %d", i); int vc = lsm_volume_create(c, p, name, 20000000, LSM_VOLUME_PROVISION_DEFAULT, &n, &job, LSM_CLIENT_FLAG_RSVD); fail_unless( vc == LSM_ERR_OK || vc == LSM_ERR_JOB_STARTED, "lsmVolumeCreate %d (%s)", vc, error(lsm_error_last_get(c))); if( LSM_ERR_JOB_STARTED == vc ) { n = wait_for_job_vol(c, &job); } else { fail_unless(LSM_ERR_OK == vc); } G(vc, lsm_volume_record_free, n); n = NULL; } } lsm_system *get_system(lsm_connect *c) { lsm_system *rc_sys = NULL; lsm_system **sys=NULL; uint32_t count = 0; int rc = 0; G(rc, lsm_system_list, c, &sys, &count, LSM_CLIENT_FLAG_RSVD); if( LSM_ERR_OK == rc && count) { rc_sys = lsm_system_record_copy(sys[0]); G(rc, lsm_system_record_array_free, sys, count); } return rc_sys; } START_TEST(test_smoke_test) { uint32_t i = 0; int rc = 0; lsm_pool *selectedPool = NULL; uint32_t poolCount = 0; uint32_t set_tmo = 31123; uint32_t tmo = 0; //Set timeout. G(rc, lsm_connect_timeout_set, c, set_tmo, LSM_CLIENT_FLAG_RSVD); //Get time-out. G(rc, lsm_connect_timeout_get, c, &tmo, LSM_CLIENT_FLAG_RSVD); fail_unless( set_tmo == tmo, " %u != %u", set_tmo, tmo ); lsm_pool **pools = NULL; uint32_t count = 0; int poolToUse = -1; //Get pool list G(rc, lsm_pool_list, c, NULL, NULL, &pools, &poolCount, LSM_CLIENT_FLAG_RSVD); //Check pool count count = poolCount; fail_unless(count == 4, "We are expecting 4 pools from simulator"); //Dump pools and select a pool to use for testing. for (i = 0; i < count; ++i) { printf("Id= %s, name=%s, capacity= %"PRIu64 ", remaining= %"PRIu64" " "system %s\n", lsm_pool_id_get(pools[i]), lsm_pool_name_get(pools[i]), lsm_pool_total_space_get(pools[i]), lsm_pool_free_space_get(pools[i]), lsm_pool_system_id_get(pools[i])); fail_unless( strcmp(lsm_pool_system_id_get(pools[i]), SYSTEM_ID) == 0, "Expecting system id of %s, got %s", SYSTEM_ID, lsm_pool_system_id_get(pools[i])); fail_unless(lsm_pool_status_get(pools[i]) == LSM_POOL_STATUS_OK, "%"PRIu64, lsm_pool_status_get(pools[i])); if (lsm_pool_free_space_get(pools[i]) > 20000000) { poolToUse = i; } } if (poolToUse != -1) { lsm_volume *n = NULL; char *job = NULL; selectedPool = pools[poolToUse]; int vc = lsm_volume_create(c, pools[poolToUse], "test", 20000000, LSM_VOLUME_PROVISION_DEFAULT, &n, &job, LSM_CLIENT_FLAG_RSVD); fail_unless( vc == LSM_ERR_OK || vc == LSM_ERR_JOB_STARTED, "lsmVolumeCreate %d (%s)", vc, error(lsm_error_last_get(c))); if( LSM_ERR_JOB_STARTED == vc ) { n = wait_for_job_vol(c, &job); fail_unless( n != NULL); } uint8_t dependants = 10; int child_depends = 0; G(child_depends, lsm_volume_child_dependency, c, n, &dependants, LSM_CLIENT_FLAG_RSVD); fail_unless(dependants == 0); child_depends = lsm_volume_child_dependency_delete(c, n, &job, LSM_CLIENT_FLAG_RSVD); if( LSM_ERR_JOB_STARTED == child_depends ) { wait_for_job(c, &job); } else if ( LSM_ERR_NO_STATE_CHANGE != child_depends) { fail_unless(LSM_ERR_OK == child_depends, "rc = %d", child_depends); fail_unless(NULL == job); } lsm_block_range **range = lsm_block_range_record_array_alloc(3); fail_unless(NULL != range); uint32_t bs = 0; lsm_system * system = get_system(c); int rep_bs = 0; G(rep_bs, lsm_volume_replicate_range_block_size, c, system, &bs, LSM_CLIENT_FLAG_RSVD); fail_unless(512 == bs); lsm_system_record_free(system); int rep_i = 0; for(rep_i = 0; rep_i < 3; ++rep_i) { range[rep_i] = lsm_block_range_record_alloc((rep_i * 1000), ((rep_i + 100) * 10000), 10); lsm_block_range *copy = lsm_block_range_record_copy(range[rep_i]); fail_unless( lsm_block_range_source_start_get(range[rep_i]) == lsm_block_range_source_start_get(copy)); fail_unless( lsm_block_range_dest_start_get(range[rep_i]) == lsm_block_range_dest_start_get(copy)); fail_unless ( lsm_block_range_block_count_get(range[rep_i]) == lsm_block_range_block_count_get( copy )); G(rc, lsm_block_range_record_free, copy); copy = NULL; } int rep_range = lsm_volume_replicate_range(c, LSM_VOLUME_REPLICATE_CLONE, n, n, range, 3, &job, LSM_CLIENT_FLAG_RSVD); if( LSM_ERR_JOB_STARTED == rep_range ) { wait_for_job(c, &job); } else { if( LSM_ERR_OK != rep_range ) { dump_error(lsm_error_last_get(c)); } fail_unless(LSM_ERR_OK == rep_range); } G(rc, lsm_block_range_record_array_free, range, 3); int online = 0; G(online, lsm_volume_disable, c, n, LSM_CLIENT_FLAG_RSVD); G(online, lsm_volume_enable, c, n, LSM_CLIENT_FLAG_RSVD); char *jobDel = NULL; int delRc = lsm_volume_delete(c, n, &jobDel, LSM_CLIENT_FLAG_RSVD); fail_unless( delRc == LSM_ERR_OK || delRc == LSM_ERR_JOB_STARTED, "lsm_volume_delete %d (%s)", rc, error(lsm_error_last_get(c))); if( LSM_ERR_JOB_STARTED == delRc ) { wait_for_job_vol(c, &jobDel); } G(rc, lsm_volume_record_free, n); } //Create some volumes for testing. create_volumes(c, selectedPool, 3); lsm_volume **volumes = NULL; count = 0; /* Get a list of volumes */ G(rc, lsm_volume_list, c, NULL, NULL, &volumes, &count, LSM_CLIENT_FLAG_RSVD); for (i = 0; i < count; ++i) { printf("%s - %s - %s - %"PRIu64" - %"PRIu64" - %x\n", lsm_volume_id_get(volumes[i]), lsm_volume_name_get(volumes[i]), lsm_volume_vpd83_get(volumes[i]), lsm_volume_block_size_get(volumes[i]), lsm_volume_number_of_blocks_get(volumes[i]), lsm_volume_admin_state_get(volumes[i])); } if( count ) { lsm_volume *rep = NULL; char *job = NULL; //Try a re-size then a snapshot lsm_volume *resized = NULL; char *resizeJob = NULL; int resizeRc = lsm_volume_resize(c, volumes[0], ((lsm_volume_number_of_blocks_get(volumes[0]) * lsm_volume_block_size_get(volumes[0])) * 2), &resized, &resizeJob, LSM_CLIENT_FLAG_RSVD); fail_unless(resizeRc == LSM_ERR_OK || resizeRc == LSM_ERR_JOB_STARTED, "lsmVolumeResize %d (%s)", resizeRc, error(lsm_error_last_get(c))); if( LSM_ERR_JOB_STARTED == resizeRc ) { resized = wait_for_job_vol(c, &resizeJob); } G(rc, lsm_volume_record_free, resized); //Lets create a clone of one. int repRc = lsm_volume_replicate(c, NULL, //Pool is optional LSM_VOLUME_REPLICATE_CLONE, volumes[0], "CLONE1", &rep, &job, LSM_CLIENT_FLAG_RSVD); fail_unless(repRc == LSM_ERR_OK || repRc == LSM_ERR_JOB_STARTED, "lsmVolumeReplicate %d (%s)", repRc, error(lsm_error_last_get(c))); if( LSM_ERR_JOB_STARTED == repRc ) { rep = wait_for_job_vol(c, &job); } G(rc, lsm_volume_record_free, rep); G(rc, lsm_volume_record_array_free, volumes, count); if (pools) { G(rc, lsm_pool_record_array_free, pools, poolCount); } } } END_TEST START_TEST(test_access_groups) { lsm_access_group **groups = NULL; lsm_access_group *group = NULL; uint32_t count = 0; uint32_t i = 0; lsm_string_list *init_list = NULL; lsm_system *system = NULL; int rc = 0; fail_unless(c!=NULL); system = get_system(c); G(rc, lsm_access_group_list, c, NULL, NULL, &groups, &count, LSM_CLIENT_FLAG_RSVD); fail_unless(count == 0, "Expect 0 access groups, got %"PRIu32, count); fail_unless(groups == NULL); G(rc, lsm_access_group_create, c, "test_access_groups", "iqn.1994-05.com.domain:01.89bd01", LSM_ACCESS_GROUP_INIT_TYPE_ISCSI_IQN, system, &group, LSM_CLIENT_FLAG_RSVD); if( LSM_ERR_OK == rc ) { lsm_string_list *init_list = lsm_access_group_initiator_id_get(group); lsm_string_list *init_copy = NULL; fail_unless(lsm_string_list_size(init_list) == 1); init_copy = lsm_string_list_copy(init_list); lsm_access_group_initiator_id_set(group, init_copy); printf("%s - %s - %s\n", lsm_access_group_id_get(group), lsm_access_group_name_get(group), lsm_access_group_system_id_get(group)); fail_unless(NULL != lsm_access_group_id_get(group)); fail_unless(NULL != lsm_access_group_name_get(group)); fail_unless(NULL != lsm_access_group_system_id_get(group)); lsm_access_group *copy = lsm_access_group_record_copy(group); if( copy ) { fail_unless( strcmp(lsm_access_group_id_get(group), lsm_access_group_id_get(copy)) == 0); fail_unless( strcmp(lsm_access_group_name_get(group), lsm_access_group_name_get(copy)) == 0) ; fail_unless( strcmp(lsm_access_group_system_id_get(group), lsm_access_group_system_id_get(copy)) == 0); G(rc, lsm_access_group_record_free, copy); copy = NULL; } G(rc, lsm_string_list_free, init_copy); init_copy = NULL; } G(rc, lsm_access_group_list, c, NULL, NULL, &groups, &count, LSM_CLIENT_FLAG_RSVD); fail_unless( 1 == count ); G(rc, lsm_access_group_record_array_free, groups, count); groups = NULL; count = 0; //char *job = NULL; lsm_access_group *updated = NULL; rc = lsm_access_group_initiator_add(c, group, "iqn.1994-05.com.domain:01.89bd02", LSM_ACCESS_GROUP_INIT_TYPE_ISCSI_IQN, &updated, LSM_CLIENT_FLAG_RSVD); fail_unless(LSM_ERR_OK == rc, "Expected success on lsmAccessGroupInitiatorAdd %d %d", rc, which_plugin); G(rc, lsm_access_group_list, c, NULL, NULL, &groups, &count, LSM_CLIENT_FLAG_RSVD); fail_unless( 1 == count ); fail_unless( updated != NULL ); lsm_access_group_record_free(updated); updated = NULL; if( count ) { init_list = lsm_access_group_initiator_id_get(groups[0]); fail_unless( lsm_string_list_size(init_list) == 2, "Expecting 2 initiators, current num = %d\n", lsm_string_list_size(init_list) ); for( i = 0; i < lsm_string_list_size(init_list) - 1; ++i) { printf("%d = %s\n", i, lsm_string_list_elem_get(init_list, i)); printf("Deleting initiator %s from group!\n", lsm_string_list_elem_get(init_list, i)); G(rc, lsm_access_group_initiator_delete, c, groups[0], lsm_string_list_elem_get(init_list, i), LSM_ACCESS_GROUP_INIT_TYPE_ISCSI_IQN, &updated, LSM_CLIENT_FLAG_RSVD) fail_unless(updated != NULL); lsm_access_group_record_free(updated); updated = NULL; } init_list = NULL; } if( group ) { G(rc, lsm_access_group_record_free, group); group = NULL; } G(rc, lsm_access_group_record_array_free, groups, count); groups = NULL; count = 0; G(rc, lsm_access_group_list, c, NULL, NULL, &groups, &count, LSM_CLIENT_FLAG_RSVD); fail_unless( LSM_ERR_OK == rc); fail_unless( 1 == count ); if( count ) { init_list = lsm_access_group_initiator_id_get(groups[0]); fail_unless( init_list != NULL); fail_unless( lsm_string_list_size(init_list) == 1, "%d", lsm_string_list_size(init_list)); init_list = NULL; G(rc, lsm_access_group_record_array_free, groups, count); groups = NULL; count = 0; } G(rc, lsm_system_record_free, system); system = NULL; } END_TEST START_TEST(test_access_groups_grant_revoke) { fail_unless(c!=NULL); lsm_access_group *group = NULL; int rc = 0; lsm_pool *pool = get_test_pool(c); char *job = NULL; lsm_volume *n = NULL; lsm_system *system = NULL; fail_unless(pool != NULL); system = get_system(c); G(rc, lsm_access_group_create, c, "test_access_groups_grant_revoke", ISCSI_HOST[0], LSM_ACCESS_GROUP_INIT_TYPE_ISCSI_IQN, system, &group, LSM_CLIENT_FLAG_RSVD); int vc = lsm_volume_create(c, pool, "volume_grant_test", 20000000, LSM_VOLUME_PROVISION_DEFAULT, &n, &job, LSM_CLIENT_FLAG_RSVD); fail_unless( vc == LSM_ERR_OK || vc == LSM_ERR_JOB_STARTED, "lsmVolumeCreate %d (%s)", vc, error(lsm_error_last_get(c))); if( LSM_ERR_JOB_STARTED == vc ) { n = wait_for_job_vol(c, &job); } fail_unless(n != NULL); rc = lsm_volume_mask(c, group, n, LSM_CLIENT_FLAG_RSVD); if( LSM_ERR_JOB_STARTED == rc ) { wait_for_job(c, &job); } else { fail_unless(LSM_ERR_OK == rc, "rc = %d, plug-in = %d", rc, which_plugin); } lsm_volume **volumes = NULL; uint32_t v_count = 0; G(rc, lsm_volumes_accessible_by_access_group, c, group, &volumes, &v_count, LSM_CLIENT_FLAG_RSVD); fail_unless(v_count == 1); if( v_count >= 1 ) { fail_unless(strcmp(lsm_volume_id_get(volumes[0]), lsm_volume_id_get(n)) == 0); G(rc, lsm_volume_record_array_free, volumes, v_count); } lsm_access_group **groups; uint32_t g_count = 0; G(rc, lsm_access_groups_granted_to_volume, c, n, &groups, &g_count, LSM_CLIENT_FLAG_RSVD); fail_unless(g_count == 1); if( g_count >= 1 ) { fail_unless(strcmp(lsm_access_group_id_get(groups[0]), lsm_access_group_id_get(group)) == 0); G(rc, lsm_access_group_record_array_free, groups, g_count); } rc = lsm_volume_unmask(c, group, n, LSM_CLIENT_FLAG_RSVD); if( LSM_ERR_JOB_STARTED == rc ) { wait_for_job(c, &job); } else { fail_unless(LSM_ERR_OK == rc, "rc = %d, which_plugin=%d", rc, which_plugin); } G(rc, lsm_access_group_delete, c, group, LSM_CLIENT_FLAG_RSVD); G(rc, lsm_access_group_record_free, group); G(rc, lsm_volume_record_free, n); G(rc, lsm_pool_record_free, pool); G(rc, lsm_system_record_free, system); } END_TEST START_TEST(test_fs) { fail_unless(c!=NULL); lsm_fs **fs_list = NULL; int rc = 0; uint32_t fs_count = 0; lsm_fs *nfs = NULL; lsm_fs *resized_fs = NULL; char *job = NULL; uint64_t fs_free_space = 0; lsm_pool *test_pool = get_test_pool(c); G(rc, lsm_fs_list, c, NULL, NULL, &fs_list, &fs_count, LSM_CLIENT_FLAG_RSVD); fail_unless(0 == fs_count); rc = lsm_fs_create(c, test_pool, "C_unit_test", 50000000, &nfs, &job, LSM_CLIENT_FLAG_RSVD); if( LSM_ERR_JOB_STARTED == rc ) { fail_unless(NULL == nfs); nfs = wait_for_job_fs(c, &job); } else { fail_unless(LSM_ERR_OK == rc); } fail_unless(NULL != nfs); fs_free_space = lsm_fs_free_space_get(nfs); fail_unless(fs_free_space != 0); lsm_fs *cloned_fs = NULL; rc = lsm_fs_clone(c, nfs, "cloned_fs", NULL, &cloned_fs, &job, LSM_CLIENT_FLAG_RSVD); if( LSM_ERR_JOB_STARTED == rc ) { fail_unless(NULL == cloned_fs); cloned_fs = wait_for_job_fs(c, &job); rc = lsm_fs_record_free(cloned_fs); cloned_fs = NULL; fail_unless(LSM_ERR_OK == rc, "rc= %d", rc); } else { fail_unless(LSM_ERR_OK == rc, "rc= %d", rc); } rc = lsm_fs_file_clone(c, nfs, "src/file.txt", "dest/file.txt", NULL, &job, LSM_CLIENT_FLAG_RSVD); if( LSM_ERR_JOB_STARTED == rc ) { wait_for_job(c, &job); } else { fail_unless(LSM_ERR_OK == rc); } G(rc, lsm_fs_list, c, NULL, NULL, &fs_list, &fs_count, LSM_CLIENT_FLAG_RSVD); fail_unless(2 == fs_count, "fs_count = %d", fs_count); G(rc, lsm_fs_record_array_free, fs_list, fs_count); fs_list = NULL; fs_count = 0; rc = lsm_fs_resize(c,nfs, 100000000, &resized_fs, &job, LSM_CLIENT_FLAG_RSVD); if( LSM_ERR_JOB_STARTED == rc ) { fail_unless(NULL == resized_fs); resized_fs = wait_for_job_fs(c, &job); } if ( which_plugin == 0 ){ uint8_t yes_no = 10; G(rc, lsm_fs_child_dependency, c, nfs, NULL, &yes_no, LSM_CLIENT_FLAG_RSVD); fail_unless( yes_no != 0); rc = lsm_fs_child_dependency_delete( c, nfs, NULL, &job, LSM_CLIENT_FLAG_RSVD); if( LSM_ERR_JOB_STARTED == rc ) { fail_unless(NULL != job); wait_for_job(c, &job); } else { fail_unless( LSM_ERR_OK == rc); } } rc = lsm_fs_delete(c, resized_fs, &job, LSM_CLIENT_FLAG_RSVD); if( LSM_ERR_JOB_STARTED == rc ) { wait_for_job(c, &job); } else { fail_unless(LSM_ERR_OK == rc); } G(rc, lsm_fs_record_free, resized_fs); G(rc, lsm_fs_record_free, nfs); G(rc, lsm_pool_record_free, test_pool); } END_TEST START_TEST(test_ss) { fail_unless(c != NULL); lsm_fs_ss **ss_list = NULL; uint32_t ss_count = 0; char *job = NULL; lsm_fs *fs = NULL; lsm_fs_ss *ss = NULL; lsm_pool *test_pool = get_test_pool(c); int rc = lsm_fs_create(c, test_pool, "test_fs", 100000000, &fs, &job, LSM_CLIENT_FLAG_RSVD); if( LSM_ERR_JOB_STARTED == rc ) { fs = wait_for_job_fs(c, &job); } fail_unless(fs != NULL); G(rc, lsm_pool_record_free, test_pool); test_pool = NULL; G(rc, lsm_fs_ss_list, c, fs, &ss_list, &ss_count, LSM_CLIENT_FLAG_RSVD); fail_unless( NULL == ss_list); fail_unless( 0 == ss_count ); rc = lsm_fs_ss_create(c, fs, "test_snap", &ss, &job, LSM_CLIENT_FLAG_RSVD); if( LSM_ERR_JOB_STARTED == rc ) { printf("Waiting for snap to create!\n"); ss = wait_for_job_ss(c, &job); } else { fail_unless(LSM_ERR_OK == rc); } fail_unless( NULL != ss); G(rc, lsm_fs_ss_list, c, fs, &ss_list, &ss_count, LSM_CLIENT_FLAG_RSVD); fail_unless( NULL != ss_list); fail_unless( 1 == ss_count ); lsm_string_list *files = lsm_string_list_alloc(1); if(files) { G(rc, lsm_string_list_elem_set, files, 0, "some/file/name.txt"); } rc = lsm_fs_ss_restore(c, fs, ss, files, files, 0, &job, LSM_CLIENT_FLAG_RSVD); if( LSM_ERR_JOB_STARTED == rc ) { printf("Waiting for lsm_fs_ss_restore!\n"); wait_for_job(c, &job); } else { fail_unless(LSM_ERR_OK == rc); } G(rc, lsm_string_list_free, files); rc = lsm_fs_ss_delete(c, fs, ss, &job, LSM_CLIENT_FLAG_RSVD); if( LSM_ERR_JOB_STARTED == rc ) { wait_for_job(c, &job); } G(rc, lsm_fs_ss_record_array_free, ss_list, ss_count); G(rc, lsm_fs_record_free, fs); G(rc, lsm_fs_ss_record_free, ss); } END_TEST START_TEST(test_systems) { uint32_t count = 0; lsm_system **sys=NULL; const char *id = NULL; const char *name = NULL; uint32_t status = 0; int rc = 0; fail_unless(c!=NULL); G(rc, lsm_system_list, c, &sys, &count, LSM_CLIENT_FLAG_RSVD); fail_unless(count == 1); if( count ) { id = lsm_system_id_get(sys[0]); fail_unless(id != NULL); fail_unless(strcmp(id, SYSTEM_ID) == 0, "%s", id); name = lsm_system_name_get(sys[0]); fail_unless(name != NULL); fail_unless(strcmp(name, SYSTEM_NAME) == 0); status = lsm_system_status_get(sys[0]); fail_unless(status == LSM_SYSTEM_STATUS_OK, "status = %x", status); } G(rc, lsm_system_record_array_free, sys, count); } END_TEST #define COMPARE_STR_FUNC(func, l, r) \ rc = strcmp(func(l), func(r)); \ if( rc ) \ return rc;\ #define COMPARE_NUMBER_FUNC(func, l, r)\ if( func(l) != func(r) ) \ return 1;\ static int compare_disks(lsm_disk *l, lsm_disk *r) { int rc; if( l && r ) { COMPARE_STR_FUNC(lsm_disk_id_get, l, r); COMPARE_STR_FUNC(lsm_disk_name_get, l, r); COMPARE_STR_FUNC(lsm_disk_system_id_get, l, r); COMPARE_NUMBER_FUNC(lsm_disk_type_get, l, r); COMPARE_NUMBER_FUNC(lsm_disk_number_of_blocks_get, l, r); COMPARE_NUMBER_FUNC(lsm_disk_block_size_get, l, r); COMPARE_NUMBER_FUNC(lsm_disk_status_get, l, r); return 0; } return 1; } START_TEST(test_disks) { uint32_t count = 0; lsm_disk **d = NULL; const char *id; const char *name; const char *system_id; int i = 0; fail_unless(c!=NULL); int rc = lsm_disk_list(c, NULL, NULL, &d, &count, 0); if( LSM_ERR_OK == rc ) { fail_unless(LSM_ERR_OK == rc, "%d", rc); fail_unless(count >= 1); for( i = 0; i < count; ++i ) { lsm_disk *d_copy = lsm_disk_record_copy( d[i] ); fail_unless( d_copy != NULL ); if( d_copy ) { fail_unless(compare_disks(d[i], d_copy) == 0); lsm_disk_record_free(d_copy); d_copy = NULL; } id = lsm_disk_id_get(d[i]); fail_unless(id != NULL && strlen(id) > 0); name = lsm_disk_name_get(d[i]); fail_unless(id != NULL && strlen(name) > 0); system_id = lsm_disk_system_id_get(d[i]); fail_unless(id != NULL && strlen(system_id) > 0); fail_unless(strcmp(system_id, SYSTEM_ID) == 0, "%s", id); fail_unless( lsm_disk_type_get(d[i]) >= 1 ); fail_unless( lsm_disk_number_of_blocks_get(d[i]) >= 1); fail_unless( lsm_disk_block_size_get(d[i]) >= 1); fail_unless( lsm_disk_status_get(d[i]) >= 1); } lsm_disk_record_array_free(d, count); } else { fail_unless(d == NULL); fail_unless(count == 0); } } END_TEST START_TEST(test_nfs_exports) { fail_unless(c != NULL); int rc = 0; lsm_pool *test_pool = get_test_pool(c); lsm_fs *nfs = NULL; char *job = NULL; fail_unless(NULL != test_pool); if( test_pool ) { rc = lsm_fs_create(c, test_pool, "C_unit_test_nfs_export", 50000000, &nfs, &job, LSM_CLIENT_FLAG_RSVD); if( LSM_ERR_JOB_STARTED == rc ) { nfs = wait_for_job_fs(c, &job); } else { fail_unless(LSM_ERR_OK == rc, "RC = %d", rc); } fail_unless(nfs != NULL); lsm_nfs_export **exports = NULL; uint32_t count = 0; G(rc, lsm_pool_record_free, test_pool); test_pool = NULL; if( nfs ) { G(rc, lsm_nfs_list, c, NULL, NULL, &exports, &count, LSM_CLIENT_FLAG_RSVD); fail_unless(count == 0); fail_unless(NULL == exports); lsm_string_list *access = lsm_string_list_alloc(1); fail_unless(NULL != access); G(rc, lsm_string_list_elem_set, access, 0, "192.168.2.29"); lsm_nfs_export *e = NULL; G(rc, lsm_nfs_export_fs, c, lsm_fs_id_get(nfs), NULL, access, access, NULL, ANON_UID_GID_NA, ANON_UID_GID_NA, NULL, NULL, &e, LSM_CLIENT_FLAG_RSVD); G(rc, lsm_nfs_export_record_free, e); e=NULL; G(rc, lsm_string_list_free, access); access = NULL; G(rc, lsm_nfs_list, c, NULL, NULL, &exports, &count, LSM_CLIENT_FLAG_RSVD); fail_unless( exports != NULL); fail_unless( count == 1 ); if( count ) { G(rc, lsm_nfs_export_delete, c, exports[0], LSM_CLIENT_FLAG_RSVD); G(rc, lsm_nfs_export_record_array_free, exports, count); exports = NULL; G(rc, lsm_nfs_list, c, NULL, NULL, &exports, &count, LSM_CLIENT_FLAG_RSVD); fail_unless(count == 0); fail_unless(NULL == exports); } G(rc, lsm_fs_record_free, nfs); nfs = NULL; } } } END_TEST struct bad_record { uint32_t m; }; START_TEST(test_volume_methods) { lsm_volume *v = NULL; lsm_pool *test_pool = NULL; char *job = NULL; int rc = 0; fail_unless(c != NULL); test_pool = get_test_pool(c); if( test_pool ) { rc = lsm_volume_create(c, test_pool, "lsm_volume_method_test", 10000000, LSM_VOLUME_PROVISION_DEFAULT, &v, &job, LSM_CLIENT_FLAG_RSVD); if( LSM_ERR_JOB_STARTED == rc ) { v = wait_for_job_vol(c, &job); } else { fail_unless(LSM_ERR_OK == rc, "rc %d", rc); } if ( v ) { fail_unless( strcmp(lsm_volume_pool_id_get(v), lsm_pool_id_get(test_pool)) == 0 ); rc = lsm_volume_delete(c, v, &job, LSM_CLIENT_FLAG_RSVD); if( LSM_ERR_JOB_STARTED == rc ) { wait_for_job(c, &job); } else { fail_unless(LSM_ERR_OK == rc, "rc %d", rc); } G(rc, lsm_volume_record_free, v); v = NULL; } G(rc, lsm_pool_record_free, test_pool); test_pool = NULL; } } END_TEST START_TEST(test_invalid_input) { fail_unless(c != NULL); int rc = 0; struct bad_record bad; bad.m = 0xA0A0A0A0; printf("Testing arguments\n"); lsm_pool *test_pool = get_test_pool(c); lsm_connect *test_connect = NULL; lsm_error_ptr test_error = NULL; rc = lsm_connect_password(NULL, NULL, NULL, 20000, NULL, LSM_CLIENT_FLAG_RSVD); fail_unless(rc == LSM_ERR_INVALID_ARGUMENT, "rc %d", rc); rc = lsm_connect_password("INVALID_URI:\\yep", NULL, &test_connect, 20000, &test_error, LSM_CLIENT_FLAG_RSVD); fail_unless(rc == LSM_ERR_INVALID_ARGUMENT, "rc %d", rc); rc = lsm_connect_close((lsm_connect *)&bad, LSM_CLIENT_FLAG_RSVD); fail_unless(LSM_ERR_INVALID_ARGUMENT == rc, "rc %d", rc); rc = lsm_connect_close((lsm_connect *)NULL, LSM_CLIENT_FLAG_RSVD); fail_unless(LSM_ERR_INVALID_ARGUMENT == rc, "rc %d", rc); rc = lsm_job_status_get(c, NULL, NULL, NULL, LSM_CLIENT_FLAG_RSVD); fail_unless(LSM_ERR_INVALID_ARGUMENT == rc, "rc %d", rc); char *job = NULL; rc = lsm_job_status_get(c, job, NULL, NULL, LSM_CLIENT_FLAG_RSVD); fail_unless(LSM_ERR_INVALID_ARGUMENT == rc, "rc %d", rc); lsm_job_status status; rc = lsm_job_status_get(c, job, &status, NULL, LSM_CLIENT_FLAG_RSVD); fail_unless(LSM_ERR_INVALID_ARGUMENT == rc, "rc %d", rc); uint8_t percent_complete; rc = lsm_job_status_get(c, "NO_SUCH_JOB", &status, &percent_complete, LSM_CLIENT_FLAG_RSVD); fail_unless(LSM_ERR_NOT_FOUND_JOB == rc, "rc %d", rc); /* lsmJobStatusVolumeGet */ lsm_volume *vol = NULL; rc = lsm_job_status_volume_get(c, NULL, NULL, NULL, NULL, LSM_CLIENT_FLAG_RSVD); fail_unless(LSM_ERR_INVALID_ARGUMENT == rc, "rc %d", rc); rc = lsm_job_status_volume_get(c, NULL, NULL, NULL, &vol, LSM_CLIENT_FLAG_RSVD); fail_unless(LSM_ERR_INVALID_ARGUMENT == rc, "rc %d", rc); rc = lsm_job_status_volume_get(c, job, NULL, NULL, &vol, LSM_CLIENT_FLAG_RSVD); fail_unless(LSM_ERR_INVALID_ARGUMENT == rc, "rc %d", rc); rc = lsm_job_status_volume_get(c, job, &status, NULL, &vol, LSM_CLIENT_FLAG_RSVD); fail_unless(LSM_ERR_INVALID_ARGUMENT == rc, "rc %d", rc); rc = lsm_job_status_volume_get(c, "NO_SUCH_JOB", &status, &percent_complete, &vol, LSM_CLIENT_FLAG_RSVD); fail_unless(LSM_ERR_NOT_FOUND_JOB == rc, "rc %d", rc); /* lsmJobStatusFsGet */ lsm_fs *fs = NULL; rc = lsm_job_status_fs_get(c, NULL, NULL, NULL, NULL, LSM_CLIENT_FLAG_RSVD); fail_unless(LSM_ERR_INVALID_ARGUMENT == rc, "rc %d", rc); rc = lsm_job_status_fs_get(c, NULL, NULL, NULL, &fs, LSM_CLIENT_FLAG_RSVD); fail_unless(LSM_ERR_INVALID_ARGUMENT == rc, "rc %d", rc); rc = lsm_job_status_fs_get(c, job, NULL, NULL, &fs, LSM_CLIENT_FLAG_RSVD); fail_unless(LSM_ERR_INVALID_ARGUMENT == rc, "rc %d", rc); rc = lsm_job_status_fs_get(c, job, &status, NULL, &fs, LSM_CLIENT_FLAG_RSVD); fail_unless(LSM_ERR_INVALID_ARGUMENT == rc, "rc %d", rc); rc = lsm_job_status_fs_get(c, "NO_SUCH_JOB", &status, &percent_complete, &fs, LSM_CLIENT_FLAG_RSVD); fail_unless(LSM_ERR_NOT_FOUND_JOB == rc, "rc %d", rc); /* lsmJobStatusFsGet */ lsm_fs_ss *ss = (lsm_fs_ss *)&bad; rc = lsm_job_status_ss_get(c, NULL, NULL, NULL, NULL, LSM_CLIENT_FLAG_RSVD); fail_unless(LSM_ERR_INVALID_ARGUMENT == rc, "rc %d", rc); rc = lsm_job_status_ss_get(c, NULL, NULL, NULL, &ss, LSM_CLIENT_FLAG_RSVD); fail_unless(LSM_ERR_INVALID_ARGUMENT == rc, "rc %d", rc); ss = NULL; rc = lsm_job_status_ss_get(c, job, NULL, NULL, &ss, LSM_CLIENT_FLAG_RSVD); fail_unless(LSM_ERR_INVALID_ARGUMENT == rc, "rc %d", rc); rc = lsm_job_status_ss_get(c, job, &status, NULL, &ss, LSM_CLIENT_FLAG_RSVD); fail_unless(LSM_ERR_INVALID_ARGUMENT == rc, "rc %d", rc); rc = lsm_job_status_ss_get(c, "NO_SUCH_JOB", &status, &percent_complete, &ss, LSM_CLIENT_FLAG_RSVD); fail_unless(LSM_ERR_NOT_FOUND_JOB == rc, "rc %d", rc); /* lsmJobFree */ char *bogus_job = strdup("NO_SUCH_JOB"); rc = lsm_job_free(c, NULL, LSM_CLIENT_FLAG_RSVD); fail_unless(LSM_ERR_INVALID_ARGUMENT == rc, "rc %d", rc); rc = lsm_job_free(c, &bogus_job, LSM_CLIENT_FLAG_RSVD); fail_unless(LSM_ERR_NOT_FOUND_JOB == rc, "rc %d", rc); fail_unless(bogus_job != NULL, "Expected bogus job to != NULL!"); free(bogus_job); /* lsm_disk_list */ uint32_t count = 0; lsm_disk **disks = NULL; rc = lsm_disk_list(c, NULL, NULL, NULL, NULL, LSM_CLIENT_FLAG_RSVD); fail_unless(LSM_ERR_INVALID_ARGUMENT == rc, "rc %d, rc"); rc = lsm_disk_list(c, "bogus_key", NULL, &disks, &count, LSM_CLIENT_FLAG_RSVD); fail_unless(LSM_ERR_INVALID_ARGUMENT == rc, "rc %d, rc"); rc = lsm_disk_list(c, "bogus_key", "nope", &disks, &count, LSM_CLIENT_FLAG_RSVD); fail_unless(LSM_ERR_UNSUPPORTED_SEARCH_KEY == rc, "rc %d, rc"); /* lsmPoolList */ rc = lsm_pool_list(c, NULL, NULL, NULL, NULL, LSM_CLIENT_FLAG_RSVD); fail_unless(LSM_ERR_INVALID_ARGUMENT == rc, "rc %d", rc); lsm_pool **pools = NULL; rc = lsm_pool_list(c, NULL, NULL, &pools, NULL, LSM_CLIENT_FLAG_RSVD); fail_unless(LSM_ERR_INVALID_ARGUMENT == rc, "rc %d", rc); rc = lsm_pool_list(c, NULL, NULL, NULL, &count, LSM_CLIENT_FLAG_RSVD); fail_unless(LSM_ERR_INVALID_ARGUMENT == rc, "rc %d", rc); pools = (lsm_pool **)&bad; rc = lsm_pool_list(c, NULL, NULL, &pools, &count, LSM_CLIENT_FLAG_RSVD); fail_unless(LSM_ERR_INVALID_ARGUMENT == rc, "rc %d", rc); pools = NULL; rc = lsm_pool_list(c, "bogus_key", "nope", &pools, &count, LSM_CLIENT_FLAG_RSVD); fail_unless(LSM_ERR_UNSUPPORTED_SEARCH_KEY == rc, "rc %d", rc); rc = lsm_pool_list(c, "bogus_key", NULL, &pools, &count, LSM_CLIENT_FLAG_RSVD); fail_unless(LSM_ERR_INVALID_ARGUMENT == rc, "rc %d", rc); /* lsmVolumeList */ rc = lsm_volume_list(c, NULL, NULL, NULL, NULL, LSM_CLIENT_FLAG_RSVD); fail_unless(LSM_ERR_INVALID_ARGUMENT == rc, "rc %d", rc); lsm_volume **vols = NULL; rc = lsm_volume_list(c, NULL, NULL, &vols, NULL, LSM_CLIENT_FLAG_RSVD); fail_unless(LSM_ERR_INVALID_ARGUMENT == rc, "rc %d", rc); rc = lsm_volume_list(c, NULL, NULL, NULL, &count, LSM_CLIENT_FLAG_RSVD); fail_unless(LSM_ERR_INVALID_ARGUMENT == rc, "rc %d", rc); vols = (lsm_volume **)&bad; rc = lsm_volume_list(c, NULL, NULL, &vols, &count, LSM_CLIENT_FLAG_RSVD); fail_unless(LSM_ERR_INVALID_ARGUMENT == rc, "rc %d", rc); vols = NULL; rc = lsm_volume_list(c, "bogus_key", "nope", &vols, &count, LSM_CLIENT_FLAG_RSVD); fail_unless(LSM_ERR_UNSUPPORTED_SEARCH_KEY == rc, "rc %d", rc); rc = lsm_volume_list(c, "bogus_key", NULL, &vols, &count, LSM_CLIENT_FLAG_RSVD); fail_unless(LSM_ERR_INVALID_ARGUMENT == rc, "rc %d", rc); /* lsmVolumeCreate */ lsm_volume *new_vol = NULL; job = NULL; rc = lsm_volume_create(c, NULL, NULL, 0, 0, NULL, NULL, LSM_CLIENT_FLAG_RSVD); fail_unless(LSM_ERR_INVALID_ARGUMENT == rc, "rc %d", rc); rc = lsm_volume_create(c, (lsm_pool *)&bad, "BAD_POOL", 10000000, LSM_VOLUME_PROVISION_DEFAULT, &new_vol, &job, LSM_CLIENT_FLAG_RSVD); fail_unless(LSM_ERR_INVALID_ARGUMENT == rc, "rc %d", rc); rc = lsm_volume_create(c, test_pool, "", 10000000, LSM_VOLUME_PROVISION_DEFAULT, &new_vol, &job, LSM_CLIENT_FLAG_RSVD); fail_unless(LSM_ERR_INVALID_ARGUMENT == rc, "rc %d", rc); rc = lsm_volume_create(c, test_pool, "ARG_TESTING", 10000000, LSM_VOLUME_PROVISION_DEFAULT, NULL, &job, LSM_CLIENT_FLAG_RSVD); fail_unless(LSM_ERR_INVALID_ARGUMENT == rc, "rc %d", rc); rc = lsm_volume_create(c, test_pool, "ARG_TESTING", 10000000, LSM_VOLUME_PROVISION_DEFAULT, &new_vol, NULL, LSM_CLIENT_FLAG_RSVD); fail_unless(LSM_ERR_INVALID_ARGUMENT == rc, "rc %d", rc); job = "NOT_NULL"; rc = lsm_volume_create(c, test_pool, "ARG_TESTING", 10000000, LSM_VOLUME_PROVISION_DEFAULT, &new_vol, &job, LSM_CLIENT_FLAG_RSVD); fail_unless(LSM_ERR_INVALID_ARGUMENT == rc, "rc %d", rc); job = NULL; rc = lsm_volume_create(c, test_pool, "ARG_TESTING", 10000000, LSM_VOLUME_PROVISION_DEFAULT, &new_vol, &job, LSM_CLIENT_FLAG_RSVD); if( LSM_ERR_JOB_STARTED == rc ) { new_vol = wait_for_job_vol(c, &job); } else { fail_unless(LSM_ERR_OK == rc, "rc %d", rc); } /* lsmVolumeResize */ rc = lsm_volume_resize(c, NULL, 0, NULL, NULL, LSM_CLIENT_FLAG_RSVD); fail_unless(LSM_ERR_INVALID_ARGUMENT == rc, "rc %d", rc); lsm_volume *resized = (lsm_volume *)&bad; rc = lsm_volume_resize(c, new_vol, 20000000, &resized, NULL, LSM_CLIENT_FLAG_RSVD); fail_unless(LSM_ERR_INVALID_ARGUMENT == rc, "rc %d", rc); resized = NULL; rc = lsm_volume_resize(c, new_vol, 20000000, &resized, NULL, LSM_CLIENT_FLAG_RSVD); fail_unless(LSM_ERR_INVALID_ARGUMENT == rc, "rc %d", rc); rc = lsm_volume_resize(c, new_vol, lsm_volume_number_of_blocks_get(new_vol) * lsm_volume_block_size_get(new_vol), &resized, &job, LSM_CLIENT_FLAG_RSVD); fail_unless(LSM_ERR_NO_STATE_CHANGE == rc, "rc = %d", rc); rc = lsm_volume_resize(c, new_vol, 20000000, &resized, &job, LSM_CLIENT_FLAG_RSVD); if( LSM_ERR_JOB_STARTED == rc ) { resized = wait_for_job_vol(c, &job); } else { fail_unless(LSM_ERR_OK == rc, "rc %d", rc); } /* lsmVolumeDelete */ rc = lsm_volume_delete(c, NULL, NULL, LSM_CLIENT_FLAG_RSVD); fail_unless(LSM_ERR_INVALID_ARGUMENT == rc, "rc %d", rc); rc = lsm_volume_delete(c, resized, NULL, LSM_CLIENT_FLAG_RSVD); fail_unless(LSM_ERR_INVALID_ARGUMENT == rc, "rc %d", rc); rc = lsm_volume_delete(c, resized, &job, LSM_CLIENT_FLAG_RSVD); if( LSM_ERR_JOB_STARTED == rc ) { wait_for_job(c, &job); } else { fail_unless(LSM_ERR_OK == rc, "rc %d", rc); } /* lsmStorageCapabilities * */ lsm_system **sys = NULL; uint32_t num_systems = 0; rc = lsm_system_list(c, &sys, &num_systems, LSM_CLIENT_FLAG_RSVD ); fail_unless(LSM_ERR_OK == rc, "rc %d", rc); fail_unless( sys != NULL); fail_unless( num_systems >= 1, "num_systems %d", num_systems); rc = lsm_capabilities(c, NULL, NULL, LSM_CLIENT_FLAG_RSVD); fail_unless(LSM_ERR_INVALID_ARGUMENT, "rc %d", rc); if( num_systems ) { rc = lsm_capabilities(c, sys[0], NULL, LSM_CLIENT_FLAG_RSVD); fail_unless(LSM_ERR_INVALID_ARGUMENT, "rc %d", rc); } /* lsmVolumeReplicate */ lsm_volume *cloned = NULL; rc = lsm_volume_replicate(c, (lsm_pool *)&bad, 0, NULL, NULL, NULL, NULL, LSM_CLIENT_FLAG_RSVD); fail_unless(rc == LSM_ERR_INVALID_ARGUMENT, "rc = %d", rc); rc = lsm_volume_replicate(c, test_pool, LSM_VOLUME_REPLICATE_CLONE, NULL, "cloned", &cloned, &job, LSM_CLIENT_FLAG_RSVD); fail_unless(rc == LSM_ERR_INVALID_ARGUMENT, "rc = %d", rc); rc = lsm_volume_replicate(c, test_pool, LSM_VOLUME_REPLICATE_CLONE, new_vol, "", &cloned, &job, LSM_CLIENT_FLAG_RSVD); fail_unless(rc == LSM_ERR_INVALID_ARGUMENT, "rc = %d", rc); rc = lsm_volume_replicate(c, test_pool, LSM_VOLUME_REPLICATE_CLONE, new_vol, "cloned", NULL, &job, LSM_CLIENT_FLAG_RSVD); fail_unless(rc == LSM_ERR_INVALID_ARGUMENT, "rc = %d", rc); rc = lsm_volume_replicate(c, test_pool, LSM_VOLUME_REPLICATE_CLONE, new_vol, "cloned", &cloned, NULL, LSM_CLIENT_FLAG_RSVD); fail_unless(rc == LSM_ERR_INVALID_ARGUMENT, "rc = %d", rc); /* lsmVolumeReplicateRangeBlockSize */ rc = lsm_volume_replicate_range_block_size(c, NULL, NULL, LSM_CLIENT_FLAG_RSVD); fail_unless(rc == LSM_ERR_INVALID_ARGUMENT, "rc = %d", rc); /* lsmVolumeReplicateRange */ rc = lsm_volume_replicate_range(c, LSM_VOLUME_REPLICATE_CLONE, NULL, NULL, NULL, 0, NULL, LSM_CLIENT_FLAG_RSVD); fail_unless(rc == LSM_ERR_INVALID_ARGUMENT, "rc = %d", rc); rc = lsm_volume_replicate_range(c, LSM_VOLUME_REPLICATE_CLONE, new_vol, NULL, NULL, 0, NULL, LSM_CLIENT_FLAG_RSVD); fail_unless(rc == LSM_ERR_INVALID_ARGUMENT, "rc = %d", rc); rc = lsm_volume_replicate_range(c, LSM_VOLUME_REPLICATE_CLONE, new_vol, new_vol, NULL, 1, &job, LSM_CLIENT_FLAG_RSVD); fail_unless(rc == LSM_ERR_INVALID_ARGUMENT, "rc = %d", rc); rc = lsm_volume_enable(c, NULL, LSM_CLIENT_FLAG_RSVD); fail_unless(rc == LSM_ERR_INVALID_ARGUMENT, "rc = %d", rc); rc = lsm_volume_disable(c, NULL, LSM_CLIENT_FLAG_RSVD); fail_unless(rc == LSM_ERR_INVALID_ARGUMENT, "rc = %d", rc); /* lsmAccessGroupCreate */ lsm_access_group *ag = NULL; lsm_system *system = NULL; system = get_system(c); rc = lsm_access_group_create(c, NULL, NULL, 0, system, NULL, LSM_CLIENT_FLAG_RSVD); fail_unless(rc == LSM_ERR_INVALID_ARGUMENT, "rc = %d", rc); rc = lsm_access_group_create(c, "my_group", ISCSI_HOST[0], LSM_ACCESS_GROUP_INIT_TYPE_OTHER, NULL, NULL, LSM_CLIENT_FLAG_RSVD); fail_unless(rc == LSM_ERR_INVALID_ARGUMENT, "rc = %d", rc); rc = lsm_access_group_create(c, "my_group", ISCSI_HOST[0], LSM_ACCESS_GROUP_INIT_TYPE_OTHER, system, &ag, LSM_CLIENT_FLAG_RSVD); fail_unless(rc == LSM_ERR_OK, "rc = %d", rc); fail_unless(ag != NULL); /* lsmAccessGroupDel */ rc = lsm_access_group_delete(c, NULL, LSM_CLIENT_FLAG_RSVD); fail_unless(rc == LSM_ERR_INVALID_ARGUMENT, "rc = %d", rc); /* lsmAccessGroupInitiatorAdd */ rc = lsm_access_group_initiator_add(c, NULL, NULL, 0, NULL, LSM_CLIENT_FLAG_RSVD); fail_unless(rc == LSM_ERR_INVALID_ARGUMENT, "rc = %d", rc); rc = lsm_access_group_initiator_delete(c, NULL, NULL, 0, NULL, LSM_CLIENT_FLAG_RSVD); fail_unless(rc == LSM_ERR_INVALID_ARGUMENT, "rc = %d", rc); rc = lsm_access_group_initiator_delete(c, ag, NULL, LSM_ACCESS_GROUP_INIT_TYPE_OTHER, NULL, LSM_CLIENT_FLAG_RSVD); fail_unless(rc == LSM_ERR_INVALID_ARGUMENT, "rc = %d", rc); rc = lsm_volume_mask(c, NULL, NULL, LSM_CLIENT_FLAG_RSVD); fail_unless(rc == LSM_ERR_INVALID_ARGUMENT, "rc = %d", rc); rc = lsm_volume_mask(c, ag, NULL, LSM_CLIENT_FLAG_RSVD); fail_unless(rc == LSM_ERR_INVALID_ARGUMENT, "rc = %d", rc); rc = lsm_volume_unmask(c, NULL, NULL, LSM_CLIENT_FLAG_RSVD); fail_unless(rc == LSM_ERR_INVALID_ARGUMENT, "rc = %d", rc); rc = lsm_volume_unmask(c, ag, NULL, LSM_CLIENT_FLAG_RSVD); fail_unless(rc == LSM_ERR_INVALID_ARGUMENT, "rc = %d", rc); /* lsmVolumesAccessibleByAccessGroup */ rc = lsm_volumes_accessible_by_access_group(c, NULL, NULL, NULL, LSM_CLIENT_FLAG_RSVD); fail_unless(rc == LSM_ERR_INVALID_ARGUMENT, "rc = %d", rc); rc = lsm_volumes_accessible_by_access_group(c, ag, NULL, NULL, LSM_CLIENT_FLAG_RSVD); fail_unless(rc == LSM_ERR_INVALID_ARGUMENT, "rc = %d", rc); /* lsmAccessGroupsGrantedToVolume */ rc = lsm_access_groups_granted_to_volume(c, NULL, NULL, NULL, LSM_CLIENT_FLAG_RSVD); fail_unless(rc == LSM_ERR_INVALID_ARGUMENT, "rc = %d", rc); rc = lsm_access_groups_granted_to_volume(c, new_vol, NULL, NULL, LSM_CLIENT_FLAG_RSVD); fail_unless(rc == LSM_ERR_INVALID_ARGUMENT, "rc = %d", rc); /* lsmVolumeChildDependency */ rc = lsm_volume_child_dependency(c, NULL, NULL, LSM_CLIENT_FLAG_RSVD); fail_unless(rc == LSM_ERR_INVALID_ARGUMENT, "rc = %d", rc); rc = lsm_volume_child_dependency(c, new_vol, NULL, LSM_CLIENT_FLAG_RSVD); fail_unless(rc == LSM_ERR_INVALID_ARGUMENT, "rc = %d", rc); /*lsmVolumeChildDependencyDelete*/ rc = lsm_volume_child_dependency_delete(c, NULL, NULL, LSM_CLIENT_FLAG_RSVD); fail_unless(rc == LSM_ERR_INVALID_ARGUMENT, "rc = %d", rc); rc = lsm_volume_child_dependency_delete(c, new_vol, NULL, LSM_CLIENT_FLAG_RSVD); fail_unless(rc == LSM_ERR_INVALID_ARGUMENT, "rc = %d", rc); /* lsmSystemList */ lsm_system **systems = NULL; rc = lsm_system_list(c, NULL, NULL, LSM_CLIENT_FLAG_RSVD); fail_unless(rc == LSM_ERR_INVALID_ARGUMENT, "rc = %d", rc); rc = lsm_system_list(c, &systems, NULL, LSM_CLIENT_FLAG_RSVD); fail_unless(rc == LSM_ERR_INVALID_ARGUMENT, "rc = %d", rc); /* lsmFsList */ rc = lsm_fs_list(c, NULL, NULL, NULL, NULL, LSM_CLIENT_FLAG_RSVD); fail_unless(rc == LSM_ERR_INVALID_ARGUMENT, "rc = %d", rc); lsm_fs **fsl = NULL; rc = lsm_fs_list(c, NULL, NULL, &fsl, NULL, LSM_CLIENT_FLAG_RSVD); fail_unless(rc == LSM_ERR_INVALID_ARGUMENT, "rc = %d", rc); rc = lsm_fs_list(c, "bogus_key", "nope", &fsl, &count, LSM_CLIENT_FLAG_RSVD); fail_unless(rc == LSM_ERR_UNSUPPORTED_SEARCH_KEY, "rc = %d", rc); /*lsmFsCreate*/ rc = lsm_fs_create(c, NULL, NULL, 0, NULL, NULL, LSM_CLIENT_FLAG_RSVD); fail_unless(rc == LSM_ERR_INVALID_ARGUMENT, "rc = %d", rc); rc = lsm_fs_create(c, test_pool, NULL, 0, NULL, NULL, LSM_CLIENT_FLAG_RSVD); fail_unless(rc == LSM_ERR_INVALID_ARGUMENT, "rc = %d", rc); lsm_fs *arg_fs = NULL; rc = lsm_fs_create(c, test_pool, "argument_fs", 10000000, &arg_fs, &job, LSM_CLIENT_FLAG_RSVD); if( LSM_ERR_JOB_STARTED == rc ) { arg_fs = wait_for_job_fs(c, &job); } else { fail_unless(LSM_ERR_OK == rc, "rc = %d", rc); } /* lsmFsDelete */ rc = lsm_fs_delete(c, NULL, NULL, LSM_CLIENT_FLAG_RSVD); fail_unless(rc == LSM_ERR_INVALID_ARGUMENT, "rc = %d", rc); rc = lsm_fs_delete(c, arg_fs, NULL, LSM_CLIENT_FLAG_RSVD); fail_unless(rc == LSM_ERR_INVALID_ARGUMENT, "rc = %d", rc); /* lsmFsResize */ rc = lsm_fs_resize(c, NULL, 0, NULL, NULL, LSM_CLIENT_FLAG_RSVD); fail_unless(rc == LSM_ERR_INVALID_ARGUMENT, "rc = %d", rc); rc = lsm_fs_resize(c, arg_fs, 0, NULL, NULL, LSM_CLIENT_FLAG_RSVD); fail_unless(rc == LSM_ERR_INVALID_ARGUMENT, "rc = %d", rc); /* lsmFsClone */ rc = lsm_fs_clone(c, NULL, NULL, NULL, NULL, NULL, LSM_CLIENT_FLAG_RSVD); fail_unless(rc == LSM_ERR_INVALID_ARGUMENT, "rc = %d", rc); rc = lsm_fs_clone(c, arg_fs, NULL, NULL, NULL, NULL, LSM_CLIENT_FLAG_RSVD); fail_unless(rc == LSM_ERR_INVALID_ARGUMENT, "rc = %d", rc); /*lsmFsFileClone*/ rc = lsm_fs_file_clone(c, NULL, NULL, NULL, NULL, NULL, LSM_CLIENT_FLAG_RSVD); fail_unless(rc == LSM_ERR_INVALID_ARGUMENT, "rc = %d", rc); rc = lsm_fs_file_clone(c, arg_fs, NULL, NULL, NULL, NULL, LSM_CLIENT_FLAG_RSVD); fail_unless(rc == LSM_ERR_INVALID_ARGUMENT, "rc = %d", rc); rc = lsm_fs_child_dependency(c, NULL, NULL, NULL, LSM_CLIENT_FLAG_RSVD); fail_unless(rc == LSM_ERR_INVALID_ARGUMENT, "rc = %d", rc); lsm_string_list *badf = (lsm_string_list *)&bad; rc = lsm_fs_child_dependency(c, arg_fs, badf, NULL, LSM_CLIENT_FLAG_RSVD); fail_unless(rc == LSM_ERR_INVALID_ARGUMENT, "rc = %d", rc); lsm_string_list *f = lsm_string_list_alloc(1); rc = lsm_fs_child_dependency(c, arg_fs, f, NULL, LSM_CLIENT_FLAG_RSVD); fail_unless(rc == LSM_ERR_INVALID_ARGUMENT, "rc = %d", rc); /*lsmFsChildDependencyDelete*/ rc = lsm_fs_child_dependency_delete(c, NULL, NULL, NULL, LSM_CLIENT_FLAG_RSVD); fail_unless(rc == LSM_ERR_INVALID_ARGUMENT, "rc = %d", rc); rc = lsm_fs_child_dependency_delete(c, arg_fs, badf, NULL, LSM_CLIENT_FLAG_RSVD); fail_unless(rc == LSM_ERR_INVALID_ARGUMENT, "rc = %d", rc); rc = lsm_fs_child_dependency_delete(c, arg_fs, f, NULL, LSM_CLIENT_FLAG_RSVD); fail_unless(rc == LSM_ERR_INVALID_ARGUMENT, "rc = %d", rc); rc = lsm_fs_ss_list(c, NULL, NULL, NULL, LSM_CLIENT_FLAG_RSVD); fail_unless(rc == LSM_ERR_INVALID_ARGUMENT, "rc = %d", rc); rc = lsm_fs_ss_list(c, arg_fs, NULL, NULL, LSM_CLIENT_FLAG_RSVD); fail_unless(rc == LSM_ERR_INVALID_ARGUMENT, "rc = %d", rc); rc = lsm_fs_ss_create(c, NULL, NULL, NULL, NULL, LSM_CLIENT_FLAG_RSVD); fail_unless(rc == LSM_ERR_INVALID_ARGUMENT, "rc = %d", rc); rc = lsm_fs_ss_create(c, arg_fs, NULL, NULL, NULL, LSM_CLIENT_FLAG_RSVD); fail_unless(rc == LSM_ERR_INVALID_ARGUMENT, "rc = %d", rc); lsm_fs_ss *arg_ss = NULL; rc = lsm_fs_ss_create(c, arg_fs, "arg_snapshot", &arg_ss, &job, LSM_CLIENT_FLAG_RSVD); if( LSM_ERR_JOB_STARTED == rc ) { arg_ss = wait_for_job_ss(c, &job); } else { fail_unless(rc == LSM_ERR_OK, "rc = %d", rc); } rc = lsm_fs_ss_delete(c, NULL, NULL, NULL, LSM_CLIENT_FLAG_RSVD); fail_unless(rc == LSM_ERR_INVALID_ARGUMENT, "rc = %d", rc); rc = lsm_fs_ss_delete(c, arg_fs, NULL, NULL, LSM_CLIENT_FLAG_RSVD); fail_unless(rc == LSM_ERR_INVALID_ARGUMENT, "rc = %d", rc); rc = lsm_fs_ss_delete(c, arg_fs, arg_ss, NULL, LSM_CLIENT_FLAG_RSVD); fail_unless(rc == LSM_ERR_INVALID_ARGUMENT, "rc = %d", rc); rc = lsm_fs_ss_restore(c, NULL, NULL, NULL, NULL, 0, NULL, LSM_CLIENT_FLAG_RSVD); fail_unless(rc == LSM_ERR_INVALID_ARGUMENT, "rc = %d", rc); rc = lsm_fs_ss_restore(c, arg_fs, NULL, NULL, NULL, 0, NULL, LSM_CLIENT_FLAG_RSVD); fail_unless(rc == LSM_ERR_INVALID_ARGUMENT, "rc = %d", rc); rc = lsm_fs_ss_restore(c, arg_fs, arg_ss, badf, NULL, 0, NULL, LSM_CLIENT_FLAG_RSVD); fail_unless(rc == LSM_ERR_INVALID_ARGUMENT, "rc = %d", rc); rc = lsm_fs_ss_restore(c, arg_fs, arg_ss, badf, badf, 0, NULL, LSM_CLIENT_FLAG_RSVD); fail_unless(rc == LSM_ERR_INVALID_ARGUMENT, "rc = %d", rc); rc = lsm_fs_ss_restore(c, arg_fs, arg_ss, f, f, 0, NULL, LSM_CLIENT_FLAG_RSVD); fail_unless(rc == LSM_ERR_INVALID_ARGUMENT, "rc = %d", rc); rc = lsm_nfs_list(c, NULL, NULL, NULL, NULL, LSM_CLIENT_FLAG_RSVD); fail_unless(rc == LSM_ERR_INVALID_ARGUMENT, "rc = %d", rc); rc = lsm_access_group_record_free(ag); ag = NULL; fail_unless(LSM_ERR_OK == rc, "%d", rc); rc = lsm_fs_ss_record_free(arg_ss); fail_unless(LSM_ERR_OK == rc, "%d", rc); arg_ss = NULL; rc = lsm_fs_record_free(arg_fs); fail_unless(LSM_ERR_OK == rc, "%d", rc); arg_fs = NULL; rc = lsm_nfs_export_fs(c, NULL, NULL, NULL, NULL, NULL, 0,0,NULL, NULL, NULL, LSM_CLIENT_FLAG_RSVD); fail_unless(rc == LSM_ERR_INVALID_ARGUMENT, "rc = %d", rc); rc = lsm_nfs_export_fs(c, NULL, NULL, badf, NULL, NULL, 0,0,NULL, NULL, NULL, LSM_CLIENT_FLAG_RSVD); fail_unless(rc == LSM_ERR_INVALID_ARGUMENT, "rc = %d", rc); rc = lsm_nfs_export_fs(c, NULL, NULL, f, badf, NULL, 0,0,NULL, NULL, NULL, LSM_CLIENT_FLAG_RSVD); fail_unless(rc == LSM_ERR_INVALID_ARGUMENT, "rc = %d", rc); rc = lsm_nfs_export_fs(c, NULL, NULL, f, f, badf, 0,0,NULL, NULL, NULL, LSM_CLIENT_FLAG_RSVD); fail_unless(rc == LSM_ERR_INVALID_ARGUMENT, "rc = %d", rc); rc = lsm_nfs_export_fs(c, NULL, NULL, f, f, f, 0,0, NULL, NULL, NULL, LSM_CLIENT_FLAG_RSVD); fail_unless(rc == LSM_ERR_INVALID_ARGUMENT, "rc = %d", rc); rc = lsm_nfs_export_delete(c, NULL, LSM_CLIENT_FLAG_RSVD); fail_unless(rc == LSM_ERR_INVALID_ARGUMENT, "rc = %d", rc); rc = lsm_volume_record_free(new_vol); new_vol = NULL; fail_unless(rc == LSM_ERR_OK, "rc = %d", rc); rc = lsm_volume_record_free(resized); resized = NULL; fail_unless(rc == LSM_ERR_OK, "rc = %d", rc); rc = lsm_system_record_array_free(sys, num_systems); fail_unless(LSM_ERR_OK == rc, "%d", rc); rc = lsm_pool_record_free(test_pool); fail_unless(LSM_ERR_OK == rc, "%d", rc); G(rc, lsm_system_record_free, system ); system = NULL; G(rc, lsm_string_list_free, f); f = NULL; } END_TEST static void cap_test( lsm_storage_capabilities *cap, lsm_capability_type t) { lsm_capability_value_type supported; supported = lsm_capability_get(cap, t); fail_unless ( lsm_capability_supported(cap, t) != 0, "lsm_capability_supported returned unsupported"); fail_unless( supported == LSM_CAP_SUPPORTED, "supported = %d for %d", supported, t); } START_TEST(test_capabilities) { int rc = 0; lsm_system **sys = NULL; uint32_t sys_count = 0; lsm_storage_capabilities *cap = NULL; G(rc, lsm_system_list, c, &sys, &sys_count, LSM_CLIENT_FLAG_RSVD); fail_unless( sys_count >= 1, "count = %d", sys_count); if( sys_count > 0 ) { G(rc, lsm_capabilities, c, sys[0], &cap, LSM_CLIENT_FLAG_RSVD); if( LSM_ERR_OK == rc ) { cap_test(cap, LSM_CAP_VOLUMES); cap_test(cap, LSM_CAP_VOLUME_CREATE); cap_test(cap, LSM_CAP_VOLUME_RESIZE); cap_test(cap, LSM_CAP_VOLUME_REPLICATE); cap_test(cap, LSM_CAP_VOLUME_REPLICATE_CLONE); cap_test(cap, LSM_CAP_VOLUME_REPLICATE_COPY); cap_test(cap, LSM_CAP_VOLUME_REPLICATE_MIRROR_ASYNC); cap_test(cap, LSM_CAP_VOLUME_REPLICATE_MIRROR_SYNC); cap_test(cap, LSM_CAP_VOLUME_COPY_RANGE_BLOCK_SIZE); cap_test(cap, LSM_CAP_VOLUME_COPY_RANGE); cap_test(cap, LSM_CAP_VOLUME_COPY_RANGE_CLONE); cap_test(cap, LSM_CAP_VOLUME_COPY_RANGE_COPY); cap_test(cap, LSM_CAP_VOLUME_DELETE); cap_test(cap, LSM_CAP_VOLUME_ENABLE); cap_test(cap, LSM_CAP_VOLUME_DISABLE); cap_test(cap, LSM_CAP_VOLUME_MASK); cap_test(cap, LSM_CAP_VOLUME_UNMASK); cap_test(cap, LSM_CAP_ACCESS_GROUPS); cap_test(cap, LSM_CAP_ACCESS_GROUP_CREATE_WWPN); cap_test(cap, LSM_CAP_ACCESS_GROUP_INITIATOR_ADD_WWPN); cap_test(cap, LSM_CAP_ACCESS_GROUP_INITIATOR_DELETE); cap_test(cap, LSM_CAP_VOLUMES_ACCESSIBLE_BY_ACCESS_GROUP); cap_test(cap, LSM_CAP_ACCESS_GROUPS_GRANTED_TO_VOLUME); cap_test(cap, LSM_CAP_VOLUME_CHILD_DEPENDENCY); cap_test(cap, LSM_CAP_VOLUME_CHILD_DEPENDENCY_RM); cap_test(cap, LSM_CAP_FS); cap_test(cap, LSM_CAP_FS_DELETE); cap_test(cap, LSM_CAP_FS_RESIZE); cap_test(cap, LSM_CAP_FS_CREATE); cap_test(cap, LSM_CAP_FS_CLONE); cap_test(cap, LSM_CAP_FILE_CLONE); cap_test(cap, LSM_CAP_FS_SNAPSHOTS); cap_test(cap, LSM_CAP_FS_SNAPSHOT_CREATE); cap_test(cap, LSM_CAP_FS_SNAPSHOT_DELETE); cap_test(cap, LSM_CAP_FS_SNAPSHOT_RESTORE); cap_test(cap, LSM_CAP_FS_SNAPSHOT_RESTORE_SPECIFIC_FILES); cap_test(cap, LSM_CAP_FS_CHILD_DEPENDENCY); cap_test(cap, LSM_CAP_FS_CHILD_DEPENDENCY_RM); cap_test(cap, LSM_CAP_FS_CHILD_DEPENDENCY_RM_SPECIFIC_FILES ); cap_test(cap, LSM_CAP_EXPORT_AUTH); cap_test(cap, LSM_CAP_EXPORTS); cap_test(cap, LSM_CAP_EXPORT_FS); cap_test(cap, LSM_CAP_EXPORT_REMOVE); G(rc, lsm_capability_record_free, cap); cap = NULL; } G(rc, lsm_system_record_array_free, sys, sys_count); } } END_TEST START_TEST(test_iscsi_auth_in) { lsm_access_group *group = NULL; lsm_system *system = NULL; int rc = 0; system = get_system(c); printf("get_system() OK\n"); G(rc, lsm_access_group_create, c, "ISCSI_AUTH", ISCSI_HOST[0], LSM_ACCESS_GROUP_INIT_TYPE_ISCSI_IQN, system, &group, LSM_CLIENT_FLAG_RSVD); printf("lsm_access_group_create() OK\n"); fail_unless(LSM_ERR_OK == rc, "rc = %d", rc); G(rc, lsm_system_record_free, system); printf("lsm_system_record_free() OK\n"); system = NULL; if( LSM_ERR_OK == rc ) { rc = lsm_iscsi_chap_auth( c, ISCSI_HOST[0], "username", "secret", NULL, NULL, LSM_CLIENT_FLAG_RSVD); fail_unless(LSM_ERR_OK == rc, "rc = %d", rc); rc = lsm_access_group_delete(c, group, LSM_CLIENT_FLAG_RSVD); fail_unless(LSM_ERR_OK == rc ); lsm_access_group_record_free(group); group = NULL; } } END_TEST START_TEST(test_plugin_info) { char *desc = NULL; char *version = NULL; int rc = 0; G(rc, lsm_plugin_info_get, c, &desc, &version, LSM_CLIENT_FLAG_RSVD); if( LSM_ERR_OK == rc ) { printf("Desc: (%s), Version: (%s)\n", desc, version); free(desc); free(version); } rc = lsm_plugin_info_get(NULL, &desc, &version, LSM_CLIENT_FLAG_RSVD); fail_unless(LSM_ERR_INVALID_ARGUMENT == rc, "rc = %d", rc); rc = lsm_plugin_info_get(c, NULL, &version, LSM_CLIENT_FLAG_RSVD); fail_unless(LSM_ERR_INVALID_ARGUMENT == rc, "rc = %d", rc); rc = lsm_plugin_info_get(c, &desc, NULL, LSM_CLIENT_FLAG_RSVD); fail_unless(LSM_ERR_INVALID_ARGUMENT == rc, "rc = %d", rc); } END_TEST START_TEST(test_get_available_plugins) { int i = 0; int num = 0; lsm_string_list *plugins = NULL; int rc = 0; G(rc, lsm_available_plugins_list, ":", &plugins, 0); num = lsm_string_list_size(plugins); for( i = 0; i < num; i++) { const char *info = lsm_string_list_elem_get(plugins, i); fail_unless(strlen(info) > 0); printf("%s\n", info); } G(rc, lsm_string_list_free, plugins); plugins = NULL; } END_TEST START_TEST(test_error_reporting) { uint8_t d[4] = {0x00, 0x01, 0x02, 0x03}; char msg[] = "Testing Errors"; char exception[] = "Exception text"; char debug_msg[] = "Debug message"; void *debug_data = NULL; uint32_t debug_size = 0; lsm_error_ptr e = lsm_error_create(LSM_ERR_LIB_BUG, msg, exception, debug_msg, d, sizeof(d)); fail_unless(e != NULL); if( e ) { fail_unless(LSM_ERR_LIB_BUG == lsm_error_number_get(e)); fail_unless(strcmp(msg, lsm_error_message_get(e)) == 0); fail_unless(strcmp(exception, lsm_error_exception_get(e)) == 0); fail_unless(strcmp(debug_msg, lsm_error_debug_get(e)) == 0); debug_data = lsm_error_debug_data_get(e, &debug_size); fail_unless(debug_data != NULL); fail_unless(debug_size == sizeof(d)); fail_unless(memcmp(d, debug_data, debug_size) == 0); fail_unless( LSM_ERR_OK == lsm_error_free(e) ); } } END_TEST START_TEST(test_capability) { int rc; int i; lsm_capability_type expected_present[] = { LSM_CAP_VOLUMES, LSM_CAP_VOLUME_CREATE, LSM_CAP_VOLUME_RESIZE, LSM_CAP_VOLUME_REPLICATE, LSM_CAP_VOLUME_REPLICATE_CLONE, LSM_CAP_VOLUME_REPLICATE_COPY, LSM_CAP_VOLUME_REPLICATE_MIRROR_ASYNC, LSM_CAP_VOLUME_REPLICATE_MIRROR_SYNC, LSM_CAP_VOLUME_COPY_RANGE_BLOCK_SIZE, LSM_CAP_VOLUME_COPY_RANGE, LSM_CAP_VOLUME_COPY_RANGE_CLONE, LSM_CAP_VOLUME_COPY_RANGE_COPY, LSM_CAP_VOLUME_DELETE, LSM_CAP_VOLUME_ENABLE, LSM_CAP_VOLUME_DISABLE, LSM_CAP_VOLUME_MASK, LSM_CAP_VOLUME_UNMASK, LSM_CAP_ACCESS_GROUPS, LSM_CAP_ACCESS_GROUP_CREATE_WWPN, LSM_CAP_ACCESS_GROUP_INITIATOR_ADD_WWPN, LSM_CAP_ACCESS_GROUP_INITIATOR_DELETE, LSM_CAP_VOLUMES_ACCESSIBLE_BY_ACCESS_GROUP, LSM_CAP_ACCESS_GROUPS_GRANTED_TO_VOLUME, LSM_CAP_VOLUME_CHILD_DEPENDENCY, LSM_CAP_VOLUME_CHILD_DEPENDENCY_RM, LSM_CAP_FS, LSM_CAP_FS_DELETE, LSM_CAP_FS_RESIZE, LSM_CAP_FS_CREATE, LSM_CAP_FS_CLONE, LSM_CAP_FILE_CLONE, LSM_CAP_FS_SNAPSHOTS, LSM_CAP_FS_SNAPSHOT_CREATE, LSM_CAP_FS_SNAPSHOT_DELETE, LSM_CAP_FS_SNAPSHOT_RESTORE, LSM_CAP_FS_SNAPSHOT_RESTORE_SPECIFIC_FILES, LSM_CAP_FS_CHILD_DEPENDENCY, LSM_CAP_FS_CHILD_DEPENDENCY_RM, LSM_CAP_FS_CHILD_DEPENDENCY_RM_SPECIFIC_FILES, LSM_CAP_EXPORT_AUTH, LSM_CAP_EXPORTS, LSM_CAP_EXPORT_FS, LSM_CAP_EXPORT_REMOVE}; lsm_capability_type expected_absent[] = { }; lsm_storage_capabilities *cap = lsm_capability_record_alloc(NULL); fail_unless(cap != NULL); if( cap ) { G(rc, lsm_capability_set_n, cap, LSM_CAP_SUPPORTED, LSM_CAP_VOLUMES, LSM_CAP_VOLUME_CREATE, LSM_CAP_VOLUME_RESIZE, LSM_CAP_VOLUME_REPLICATE, LSM_CAP_VOLUME_REPLICATE_CLONE, LSM_CAP_VOLUME_REPLICATE_COPY, LSM_CAP_VOLUME_REPLICATE_MIRROR_ASYNC, LSM_CAP_VOLUME_REPLICATE_MIRROR_SYNC, LSM_CAP_VOLUME_COPY_RANGE_BLOCK_SIZE, LSM_CAP_VOLUME_COPY_RANGE, LSM_CAP_VOLUME_COPY_RANGE_CLONE, LSM_CAP_VOLUME_COPY_RANGE_COPY, LSM_CAP_VOLUME_DELETE, LSM_CAP_VOLUME_ENABLE, LSM_CAP_VOLUME_DISABLE, LSM_CAP_VOLUME_MASK, LSM_CAP_VOLUME_UNMASK, LSM_CAP_ACCESS_GROUPS, LSM_CAP_ACCESS_GROUP_CREATE_WWPN, LSM_CAP_ACCESS_GROUP_INITIATOR_ADD_WWPN, LSM_CAP_ACCESS_GROUP_INITIATOR_DELETE, LSM_CAP_VOLUMES_ACCESSIBLE_BY_ACCESS_GROUP, LSM_CAP_ACCESS_GROUPS_GRANTED_TO_VOLUME, LSM_CAP_VOLUME_CHILD_DEPENDENCY, LSM_CAP_VOLUME_CHILD_DEPENDENCY_RM, LSM_CAP_FS, LSM_CAP_FS_DELETE, LSM_CAP_FS_RESIZE, LSM_CAP_FS_CREATE, LSM_CAP_FS_CLONE, LSM_CAP_FILE_CLONE, LSM_CAP_FS_SNAPSHOTS, LSM_CAP_FS_SNAPSHOT_CREATE, LSM_CAP_FS_SNAPSHOT_DELETE, LSM_CAP_FS_SNAPSHOT_RESTORE, LSM_CAP_FS_SNAPSHOT_RESTORE_SPECIFIC_FILES, LSM_CAP_FS_CHILD_DEPENDENCY, LSM_CAP_FS_CHILD_DEPENDENCY_RM, LSM_CAP_FS_CHILD_DEPENDENCY_RM_SPECIFIC_FILES, LSM_CAP_EXPORT_AUTH, LSM_CAP_EXPORTS, LSM_CAP_EXPORT_FS, LSM_CAP_EXPORT_REMOVE, -1 ); G(rc, lsm_capability_set, cap, LSM_CAP_EXPORTS, LSM_CAP_SUPPORTED); for( i = 0; i < sizeof(expected_present)/sizeof(expected_present[0]); ++i) { fail_unless( lsm_capability_get(cap, expected_present[i]) == LSM_CAP_SUPPORTED); } for( i = 0; i < sizeof(expected_absent)/sizeof(expected_absent[0]); ++i) { fail_unless( lsm_capability_get(cap, expected_absent[i]) == LSM_CAP_UNSUPPORTED); } G(rc, lsm_capability_record_free, cap); } } END_TEST START_TEST(test_nfs_export_funcs) { const char id[] = "export_unique_id"; const char fs_id[] = "fs_unique_id"; const char export_path[] = "/mnt/foo"; const char auth[] = "simple"; uint64_t anonuid = 1021; uint64_t anongid = 1000; const char options[] = "vendor_specific_option"; const char p_data[] = "plug-in private data"; char rstring[33]; int rc = 0; lsm_string_list *root = lsm_string_list_alloc(0); G(rc, lsm_string_list_append, root, "192.168.100.2"); G(rc, lsm_string_list_append, root, "192.168.100.3"); lsm_string_list *rw = lsm_string_list_alloc(0); G(rc, lsm_string_list_append, rw, "192.168.100.2"); G(rc, lsm_string_list_append, rw, "192.168.100.3"); lsm_string_list *rand = lsm_string_list_alloc(0); lsm_string_list *ro = lsm_string_list_alloc(0); G(rc, lsm_string_list_append, ro, "*"); lsm_nfs_export *export = lsm_nfs_export_record_alloc(id, fs_id, export_path, auth, root, rw, ro, anonuid, anongid, options, p_data); lsm_nfs_export *copy = lsm_nfs_export_record_copy(export); fail_unless( strcmp(lsm_nfs_export_id_get(copy), id) == 0 ); fail_unless( strcmp(lsm_nfs_export_fs_id_get(copy), fs_id) == 0); fail_unless( strcmp(lsm_nfs_export_export_path_get(copy), export_path) == 0); fail_unless( strcmp(lsm_nfs_export_auth_type_get(copy), auth) == 0); fail_unless( strcmp(lsm_nfs_export_options_get(copy), options) == 0); fail_unless( lsm_nfs_export_anon_uid_get(copy) == anonuid); fail_unless( lsm_nfs_export_anon_gid_get(copy) == anongid); fail_unless(compare_string_lists(lsm_nfs_export_root_get(export), lsm_nfs_export_root_get(copy)) == 0); fail_unless(compare_string_lists(lsm_nfs_export_read_write_get(export), lsm_nfs_export_read_write_get(copy)) == 0); fail_unless(compare_string_lists(lsm_nfs_export_read_only_get(export), lsm_nfs_export_read_only_get(copy)) == 0); G(rc, lsm_nfs_export_record_free, copy); generate_random(rstring, sizeof(rstring)); G(rc, lsm_nfs_export_id_set, export, rstring); fail_unless( strcmp(lsm_nfs_export_id_get(export), rstring) == 0 ); generate_random(rstring, sizeof(rstring)); G(rc, lsm_nfs_export_fs_id_set, export, rstring); fail_unless( strcmp(lsm_nfs_export_fs_id_get(export), rstring) == 0 ); generate_random(rstring, sizeof(rstring)); G(rc, lsm_nfs_export_export_path_set, export, rstring); fail_unless( strcmp(lsm_nfs_export_export_path_get(export), rstring) == 0 ); generate_random(rstring, sizeof(rstring)); G(rc, lsm_nfs_export_auth_type_set, export, rstring); fail_unless( strcmp(lsm_nfs_export_auth_type_get(export), rstring) == 0 ); generate_random(rstring, sizeof(rstring)); G(rc, lsm_nfs_export_options_set, export, rstring); fail_unless( strcmp(lsm_nfs_export_options_get(export), rstring) == 0 ); anonuid = anonuid + 700; G(rc, lsm_nfs_export_anon_uid_set, export, anonuid); anongid = anongid + 400; G(rc, lsm_nfs_export_anon_gid_set, export, anongid); fail_unless(lsm_nfs_export_anon_uid_get(export) == anonuid); fail_unless(lsm_nfs_export_anon_gid_get(export) == anongid); generate_random(rstring, sizeof(rstring)); G(rc, lsm_string_list_append, rand, rstring); G(rc, lsm_nfs_export_root_set, export, rand); fail_unless(compare_string_lists(lsm_nfs_export_root_get(export), rand) == 0); generate_random(rstring, sizeof(rstring)); G(rc, lsm_string_list_append, rand, rstring); G(rc, lsm_nfs_export_read_write_set, export, rand); fail_unless(compare_string_lists(lsm_nfs_export_read_write_get(export), rand) == 0); generate_random(rstring, sizeof(rstring)); G(rc, lsm_string_list_append, rand, rstring); G(rc, lsm_nfs_export_read_only_set, export, rand); fail_unless(compare_string_lists(lsm_nfs_export_read_only_get(export), rand) == 0); G(rc, lsm_nfs_export_record_free, export); export = NULL; G(rc, lsm_string_list_free, root); root = NULL; G(rc, lsm_string_list_free, rw); rw = NULL; G(rc, lsm_string_list_free, ro); ro = NULL; G(rc, lsm_string_list_free, rand); rand = NULL; } END_TEST START_TEST(test_uri_parse) { const char uri_g[] = "sim://user@host:123/path/?namespace=root/uber"; const char uri_no_path[] = "smis://user@host?namespace=root/emc"; char *scheme = NULL; char *user = NULL; char *server = NULL; char *path = NULL; int port = 0; lsm_hash *qp = NULL; int rc = 0; G(rc, lsm_uri_parse, uri_g, &scheme, &user, &server, &port, &path, &qp); if( LSM_ERR_OK == rc ) { fail_unless(strcmp(scheme, "sim") == 0, "%s", scheme); fail_unless(strcmp(user, "user") == 0, "%s", user); fail_unless(strcmp(server, "host") == 0, "%s", server); fail_unless(strcmp(path, "/path/") == 0, "%s", path); fail_unless(port == 123, "%d", port); fail_unless(qp != NULL); if( qp ) { fail_unless(strcmp("root/uber", lsm_hash_string_get(qp, "namespace")) == 0, "%s", lsm_hash_string_get(qp, "namespace")); } free(scheme); scheme = NULL; free(user); user = NULL; free(server); server = NULL; free(path); path = NULL; G(rc, lsm_hash_free, qp); qp = NULL; } port = 0; G(rc, lsm_uri_parse, uri_no_path, &scheme, &user, &server, &port, &path, &qp); if( LSM_ERR_OK == rc ) { fail_unless(strcmp(scheme, "smis") == 0, "%s", scheme); fail_unless(strcmp(user, "user") == 0, "%s", user); fail_unless(strcmp(server, "host") == 0, "%s", server); fail_unless(path == NULL, "%s", path); fail_unless(port == 0, "%d", port); fail_unless(qp != NULL); if( qp ) { fail_unless(strcmp("root/emc", lsm_hash_string_get(qp, "namespace")) == 0, "%s", lsm_hash_string_get(qp, "namespace")); } free(scheme); scheme = NULL; free(user); user = NULL; free(server); server = NULL; G(rc, lsm_hash_free, qp); qp = NULL; } } END_TEST START_TEST(test_search_pools) { int rc; lsm_pool **pools = NULL; uint32_t poolCount = 0; G(rc, lsm_pool_list, c, NULL, NULL, &pools, &poolCount, LSM_CLIENT_FLAG_RSVD); if( LSM_ERR_OK == rc && poolCount ) { lsm_pool **search_pools = NULL; uint32_t search_count = 0; G(rc, lsm_pool_list, c, "id", lsm_pool_id_get(pools[0]), &search_pools, &search_count, LSM_CLIENT_FLAG_RSVD); fail_unless(search_count == 1, "Expecting 1 pool, got %d", search_count); G(rc, lsm_pool_record_array_free, search_pools, search_count); /* Search for non-existent pool*/ search_pools = NULL; search_count = 0; G(rc, lsm_pool_list, c, "id", "non-existent-id", &search_pools, &search_count, LSM_CLIENT_FLAG_RSVD); fail_unless(search_count == 0, "Expecting no pools! %d", search_count); /* Search which results in all pools */ G(rc, lsm_pool_list, c, "system_id", lsm_pool_system_id_get(pools[0]), &search_pools, &search_count, LSM_CLIENT_FLAG_RSVD); fail_unless(search_count == poolCount, "Expecting %d pools, got %d", poolCount, search_count); G(rc, lsm_pool_record_array_free, search_pools, search_count); search_pools = NULL; search_count = 0; G(rc, lsm_pool_record_array_free, pools, poolCount); pools = NULL; poolCount = 0; } } END_TEST START_TEST(test_search_volumes) { int rc; lsm_volume **volumes = NULL; uint32_t volume_count = 0; lsm_pool *pool = get_test_pool(c); // Make some volumes to we can actually filter create_volumes(c, pool, 10); G(rc, lsm_volume_list, c, NULL, NULL, &volumes, &volume_count, LSM_CLIENT_FLAG_RSVD); fail_unless(volume_count > 0, "We are expecting some volumes!"); if( LSM_ERR_OK == rc && volume_count ) { lsm_volume **search_volume = NULL; uint32_t search_count = 0; G(rc, lsm_volume_list, c, "id", lsm_volume_id_get(volumes[0]), &search_volume, &search_count, LSM_CLIENT_FLAG_RSVD); fail_unless(search_count == 1, "Expecting 1 pool, got %d", search_count); G(rc, lsm_volume_record_array_free, search_volume, search_count); search_volume = NULL; search_count = 0; /* Search for non-existent */ G(rc, lsm_volume_list, c, "id", "non-existent-id", &search_volume, &search_count, LSM_CLIENT_FLAG_RSVD); fail_unless(search_count == 0, "Expecting no volumes! %d", search_count); /* Search which results in all volumes */ G(rc, lsm_volume_list, c, "system_id", lsm_volume_system_id_get(volumes[0]), &search_volume, &search_count, LSM_CLIENT_FLAG_RSVD); fail_unless(search_count == volume_count, "Expecting %d volumes, got %d", volume_count, search_count); G(rc, lsm_volume_record_array_free, search_volume, search_count); search_volume = NULL; search_count = 0; G(rc, lsm_volume_record_array_free, volumes, volume_count); volumes = NULL; volume_count = 0; } G(rc, lsm_pool_record_free, pool); pool = NULL; } END_TEST START_TEST(test_search_disks) { int rc; lsm_disk **disks = NULL; uint32_t disk_count = 0; lsm_pool *pool = get_test_pool(c); G(rc, lsm_disk_list, c, NULL, NULL, &disks, &disk_count, LSM_CLIENT_FLAG_RSVD); fail_unless(disk_count > 0, "We are expecting some disks!"); if( LSM_ERR_OK == rc && disk_count ) { lsm_disk **search_disks = NULL; uint32_t search_count = 0; G(rc, lsm_disk_list, c, "id", lsm_disk_id_get(disks[0]), &search_disks, &search_count, LSM_CLIENT_FLAG_RSVD); fail_unless(search_count == 1, "Expecting 1 disk, got %d", search_count); G(rc, lsm_disk_record_array_free, search_disks, search_count); search_disks = NULL; search_count = 0; /* Search for non-existent */ G(rc, lsm_disk_list, c, "id", "non-existent-id", &search_disks, &search_count, LSM_CLIENT_FLAG_RSVD); fail_unless(search_count == 0, "Expecting no disks! %d", search_count); /* Search which results in all disks */ G(rc, lsm_disk_list, c, "system_id", lsm_disk_system_id_get(disks[0]), &search_disks, &search_count, LSM_CLIENT_FLAG_RSVD); fail_unless(search_count == disk_count, "Expecting %d disks, got %d", disk_count, search_count); G(rc, lsm_disk_record_array_free, search_disks, search_count); G(rc, lsm_disk_record_array_free, disks, disk_count); disks = NULL; disk_count = 0; } lsm_pool_record_free(pool); } END_TEST START_TEST(test_search_access_groups) { int rc; lsm_access_group **ag = NULL; uint32_t count = 0; int i = 0; lsm_access_group *group = NULL; lsm_pool *pool = get_test_pool(c); lsm_system *system = get_system(c); fail_unless(system != NULL, "Missing system!"); for( i = 0; i < 2; ++i ) { char ag_name[64]; snprintf(ag_name, sizeof(ag_name), "test_access_group_%d", i); G(rc, lsm_access_group_create, c, ag_name, ISCSI_HOST[i], LSM_ACCESS_GROUP_INIT_TYPE_ISCSI_IQN, system, &group, LSM_CLIENT_FLAG_RSVD); if( LSM_ERR_OK == rc ) { G(rc, lsm_access_group_record_free, group); group = NULL; } } G(rc, lsm_system_record_free, system); system = NULL; G(rc, lsm_access_group_list, c, NULL, NULL, &ag, &count, LSM_CLIENT_FLAG_RSVD); fail_unless(count > 0, "We are expecting some access_groups!"); if( LSM_ERR_OK == rc && count ) { lsm_access_group **search_ag = NULL; uint32_t search_count = 0; G(rc, lsm_access_group_list, c, "id", lsm_access_group_id_get(ag[0]), &search_ag, &search_count, LSM_CLIENT_FLAG_RSVD); fail_unless(search_count == 1, "Expecting 1 access group, got %d", search_count); G(rc, lsm_access_group_record_array_free, search_ag, search_count); /* Search for non-existent */ search_ag = NULL; search_count = 0; G(rc, lsm_access_group_list, c, "id", "non-existent-id", &search_ag, &search_count, LSM_CLIENT_FLAG_RSVD); fail_unless(search_count == 0, "Expecting no access groups! %d", search_count); /* Search which results in all disks */ G(rc, lsm_access_group_list, c, "system_id", lsm_access_group_system_id_get(ag[0]), &search_ag, &search_count, LSM_CLIENT_FLAG_RSVD); fail_unless(search_count == count, "Expecting %d access groups, got %d", count, search_count); G(rc, lsm_access_group_record_array_free, search_ag, search_count); search_ag = NULL; search_count = 0; G(rc, lsm_access_group_record_array_free, ag, count); ag = NULL; count = 0; } G(rc, lsm_pool_record_free, pool); pool = NULL; } END_TEST START_TEST(test_search_fs) { int rc; lsm_fs **fsl = NULL; lsm_fs *fs = NULL; uint32_t count = 0; int i = 0; char *job = NULL; lsm_pool *pool = get_test_pool(c); for( i = 0; i < 2; ++i ) { char fs_name[64]; snprintf(fs_name, sizeof(fs_name), "test_fs_%d", i); rc = lsm_fs_create(c, pool, fs_name, 50000000, &fs, &job, LSM_CLIENT_FLAG_RSVD); if( LSM_ERR_JOB_STARTED == rc ) { fail_unless(NULL == fs); fs = wait_for_job_fs(c, &job); } else { fail_unless(LSM_ERR_OK == rc); } G(rc, lsm_fs_record_free, fs); fs = NULL; } G(rc, lsm_fs_list, c, NULL, NULL, &fsl, &count, LSM_CLIENT_FLAG_RSVD); fail_unless(count > 0, "We are expecting some file systems!"); if( LSM_ERR_OK == rc && count ) { lsm_fs **search_fs = NULL; uint32_t search_count = 0; G(rc, lsm_fs_list, c, "id", lsm_fs_id_get(fsl[0]), &search_fs, &search_count, LSM_CLIENT_FLAG_RSVD); fail_unless(search_count == 1, "Expecting 1 fs, got %d", search_count); G(rc, lsm_fs_record_array_free, search_fs, search_count); search_fs = NULL; search_count = 0; /* Search for non-existent */ G(rc, lsm_fs_list, c, "id", "non-existent-id", &search_fs, &search_count, LSM_CLIENT_FLAG_RSVD); fail_unless(search_count == 0, "Expecting no fs! %d", search_count); /* Search which results in all disks */ G(rc, lsm_fs_list, c, "system_id", lsm_fs_system_id_get(fsl[0]), &search_fs, &search_count, LSM_CLIENT_FLAG_RSVD); fail_unless(search_count == count, "Expecting %d fs, got %d", count, search_count); G(rc, lsm_fs_record_array_free, search_fs, search_count); G(rc, lsm_fs_record_array_free, fsl, count); fsl = NULL; count = 0; } lsm_pool_record_free(pool); } END_TEST static void verify_string(const char *method, const char *value) { fail_unless(method != NULL, "%s rc is NULL", method); if( value ) { fail_unless( strlen(value) > 0, "%s string len = 0", method); } } START_TEST(test_target_ports) { lsm_target_port **tp = NULL; uint32_t count = 0; uint32_t i = 0; int rc = 0; G(rc, lsm_target_port_list, c, NULL, NULL, &tp, &count, LSM_CLIENT_FLAG_RSVD); if( LSM_ERR_OK == rc ) { for( i = 0; i < count; ++i ) { verify_string("lsm_target_port_id_get", lsm_target_port_id_get(tp[i])); int pt = (int)lsm_target_port_type_get(tp[i]); fail_unless(pt >= 0 && pt <= 4, "%d", pt); verify_string("lsm_target_port_service_address_get", lsm_target_port_service_address_get(tp[i])); verify_string("lsm_target_port_network_address_get", lsm_target_port_network_address_get(tp[i])); verify_string("lsm_target_port_physical_address_get", lsm_target_port_physical_address_get(tp[i])); verify_string("lsm_target_port_physical_name_get", lsm_target_port_physical_name_get(tp[i])); verify_string("lsm_target_port_system_id_get", lsm_target_port_system_id_get(tp[i])); } { lsm_target_port **search = NULL; uint32_t search_count = 0; G(rc, lsm_target_port_list, c, "id", "does_not_exist", &search, &search_count, LSM_CLIENT_FLAG_RSVD); fail_unless(search_count == 0, "%d", search_count); G(rc, lsm_target_port_list, c, "system_id", "sim-01", &search, &search_count, LSM_CLIENT_FLAG_RSVD); fail_unless(search_count == 5, "%d", search_count); if( search_count ) { G(rc, lsm_target_port_record_array_free, search, search_count); } } G(rc, lsm_target_port_record_array_free, tp, count); } } END_TEST START_TEST(test_initiator_id_verification) { int rc = 0; lsm_access_group *group = NULL; lsm_access_group *updated_group = NULL; lsm_access_group **groups = NULL; uint32_t count = 0; lsm_system *system = get_system(c); G(rc, lsm_access_group_list, c, NULL, NULL, &groups, &count, LSM_CLIENT_FLAG_RSVD); fail_unless(count == 0, "Expect 0 access groups, got %"PRIu32, count); fail_unless(groups == NULL); /* Test valid iqns first, then invalid */ G(rc, lsm_access_group_create, c, "test_ag_iscsi", "iqn.1994-05.com.domain.sub:whatever-the.users_wants", LSM_ACCESS_GROUP_INIT_TYPE_ISCSI_IQN, system, &group, LSM_CLIENT_FLAG_RSVD); G(rc, lsm_access_group_initiator_add, c, group, "iqn.2001-04.com.example:storage:diskarrays-sn-a8675309", LSM_ACCESS_GROUP_INIT_TYPE_ISCSI_IQN, &updated_group, LSM_CLIENT_FLAG_RSVD); G(rc, lsm_access_group_record_free, group); group = updated_group; updated_group = NULL; G(rc, lsm_access_group_initiator_add, c, group, "iqn.2001-04.com.example", LSM_ACCESS_GROUP_INIT_TYPE_ISCSI_IQN, &updated_group, LSM_CLIENT_FLAG_RSVD); G(rc, lsm_access_group_record_free, group); group = updated_group; updated_group = NULL; G(rc, lsm_access_group_initiator_add, c, group, "iqn.2001-04.com.example:storage.tape1.sys1.xyz", LSM_ACCESS_GROUP_INIT_TYPE_ISCSI_IQN, &updated_group, LSM_CLIENT_FLAG_RSVD); G(rc, lsm_access_group_record_free, group); group = updated_group; updated_group = NULL; G(rc, lsm_access_group_initiator_add, c, group, "iqn.2001-04.com.example:storage.disk2.sys1.xyz", LSM_ACCESS_GROUP_INIT_TYPE_ISCSI_IQN, &updated_group, LSM_CLIENT_FLAG_RSVD); G(rc, lsm_access_group_record_free, group); group = updated_group; updated_group = NULL; G(rc, lsm_access_group_initiator_add, c, group, "0x0011223344556677", LSM_ACCESS_GROUP_INIT_TYPE_WWPN, &updated_group, LSM_CLIENT_FLAG_RSVD); G(rc, lsm_access_group_record_free, group); group = updated_group; updated_group = NULL; G(rc, lsm_access_group_initiator_add, c, group, "00:11:22:33:44:55:66:78", LSM_ACCESS_GROUP_INIT_TYPE_WWPN, &updated_group, LSM_CLIENT_FLAG_RSVD); G(rc, lsm_access_group_record_free, group); group = updated_group; updated_group = NULL; G(rc, lsm_access_group_initiator_add, c, group, "00-11-22-33-44-55-66-79", LSM_ACCESS_GROUP_INIT_TYPE_WWPN, &updated_group, LSM_CLIENT_FLAG_RSVD); G(rc, lsm_access_group_record_free, group); group = updated_group; updated_group = NULL; G(rc, lsm_access_group_initiator_add, c, group, "0x00-11-22-33-44-55-66-80", LSM_ACCESS_GROUP_INIT_TYPE_WWPN, &updated_group, LSM_CLIENT_FLAG_RSVD); G(rc, lsm_access_group_record_free, group); group = updated_group; updated_group = NULL; /* Test invalid */ rc = lsm_access_group_initiator_add(c, group, "0x:0011223344556677", LSM_ACCESS_GROUP_INIT_TYPE_WWPN, &updated_group, LSM_CLIENT_FLAG_RSVD); fail_unless(rc == LSM_ERR_INVALID_ARGUMENT, "Expected initiator id with invalid form to fail! %d", rc); /* Test invalid iqn */ rc = lsm_access_group_initiator_add(c, group, "0011223344556677:", LSM_ACCESS_GROUP_INIT_TYPE_WWPN, &updated_group, LSM_CLIENT_FLAG_RSVD); fail_unless(rc == LSM_ERR_INVALID_ARGUMENT, "Expected initiator id with invalid form to fail! %d", rc); /* Test invalid iqn */ rc = lsm_access_group_initiator_add(c, group, "001122334455667788", LSM_ACCESS_GROUP_INIT_TYPE_WWPN, &updated_group, LSM_CLIENT_FLAG_RSVD); fail_unless(rc == LSM_ERR_INVALID_ARGUMENT, "Expected initiator id with invalid form to fail! %d", rc); /* Test invalid iqn */ rc = lsm_access_group_initiator_add(c, group, "0x001122334455", LSM_ACCESS_GROUP_INIT_TYPE_WWPN, &updated_group, LSM_CLIENT_FLAG_RSVD); fail_unless(rc == LSM_ERR_INVALID_ARGUMENT, "Expected initiator id with invalid form to fail! %d", rc); /* Test invalid iqn */ rc = lsm_access_group_initiator_add(c, group, "0x00+11:22:33:44:55:66:77", LSM_ACCESS_GROUP_INIT_TYPE_WWPN, &updated_group, LSM_CLIENT_FLAG_RSVD); fail_unless(rc == LSM_ERR_INVALID_ARGUMENT, "Expected initiator id with invalid form to fail! %d", rc); /* Delete group */ G(rc, lsm_access_group_delete, c, group, LSM_CLIENT_FLAG_RSVD); G(rc, lsm_access_group_record_free, group); group = NULL; G(rc, lsm_system_record_free, system); system = NULL; } END_TEST START_TEST(test_volume_vpd_check) { int rc; F(rc, lsm_volume_vpd83_verify, NULL ); F(rc, lsm_volume_vpd83_verify, "012345678901234567890123456789AB"); F(rc, lsm_volume_vpd83_verify, "012345678901234567890123456789ax"); F(rc, lsm_volume_vpd83_verify, "012345678901234567890123456789ag"); F(rc, lsm_volume_vpd83_verify, "1234567890123456789012345abcdef"); F(rc, lsm_volume_vpd83_verify, "01234567890123456789012345abcdefa"); F(rc, lsm_volume_vpd83_verify, "01234567890123456789012345abcdef"); F(rc, lsm_volume_vpd83_verify, "55cd2e404beec32e0"); F(rc, lsm_volume_vpd83_verify, "55cd2e404beec32ex"); F(rc, lsm_volume_vpd83_verify, "55cd2e404beec32A"); F(rc, lsm_volume_vpd83_verify, "35cd2e404beec32A"); G(rc, lsm_volume_vpd83_verify, "61234567890123456789012345abcdef"); G(rc, lsm_volume_vpd83_verify, "55cd2e404beec32e"); G(rc, lsm_volume_vpd83_verify, "35cd2e404beec32e"); G(rc, lsm_volume_vpd83_verify, "25cd2e404beec32e"); } END_TEST START_TEST(test_volume_raid_info) { lsm_volume *volume = NULL; char *job = NULL; lsm_pool *pool = get_test_pool(c); int rc = lsm_volume_create( c, pool, "volume_raid_info_test", 20000000, LSM_VOLUME_PROVISION_DEFAULT, &volume, &job, LSM_CLIENT_FLAG_RSVD); fail_unless( rc == LSM_ERR_OK || rc == LSM_ERR_JOB_STARTED, "lsmVolumeCreate %d (%s)", rc, error(lsm_error_last_get(c))); if( LSM_ERR_JOB_STARTED == rc ) { volume = wait_for_job_vol(c, &job); } lsm_volume_raid_type raid_type; uint32_t strip_size, disk_count, min_io_size, opt_io_size; G( rc, lsm_volume_raid_info, c, volume, &raid_type, &strip_size, &disk_count, &min_io_size, &opt_io_size, LSM_CLIENT_FLAG_RSVD); G(rc, lsm_volume_record_free, volume); G(rc, lsm_pool_record_free, pool); volume = NULL; } END_TEST START_TEST(test_pool_member_info) { int rc; lsm_pool **pools = NULL; uint32_t poolCount = 0; G(rc, lsm_pool_list, c, NULL, NULL, &pools, &poolCount, LSM_CLIENT_FLAG_RSVD); lsm_volume_raid_type raid_type; lsm_pool_member_type member_type; lsm_string_list *member_ids = NULL; int i; uint32_t y; for (i = 0; i < poolCount; i++) { G( rc, lsm_pool_member_info, c, pools[i], &raid_type, &member_type, &member_ids, LSM_CLIENT_FLAG_RSVD); for(y = 0; y < lsm_string_list_size(member_ids); y++){ // Simulator user reading the member id. const char *cur_member_id = lsm_string_list_elem_get( member_ids, y); fail_unless( strlen(cur_member_id) ); } lsm_string_list_free(member_ids); } G(rc, lsm_pool_record_array_free, pools, poolCount); } END_TEST START_TEST(test_volume_raid_create_cap_get) { if (which_plugin == 1){ // silently skip on simc which does not support this method yet. return; } int rc; lsm_system **sys = NULL; uint32_t sys_count = 0; G(rc, lsm_system_list, c, &sys, &sys_count, LSM_CLIENT_FLAG_RSVD); fail_unless( sys_count >= 1, "count = %d", sys_count); if( sys_count > 0 ) { uint32_t *supported_raid_types = NULL; uint32_t supported_raid_type_count = 0; uint32_t *supported_strip_sizes = NULL; uint32_t supported_strip_size_count = 0; G( rc, lsm_volume_raid_create_cap_get, c, sys[0], &supported_raid_types, &supported_raid_type_count, &supported_strip_sizes, &supported_strip_size_count, 0); free(supported_raid_types); free(supported_strip_sizes); } G(rc, lsm_system_record_array_free, sys, sys_count); } END_TEST START_TEST(test_volume_raid_create) { if (which_plugin == 1){ // silently skip on simc which does not support this method yet. return; } int rc; lsm_disk **disks = NULL; uint32_t disk_count = 0; G(rc, lsm_disk_list, c, NULL, NULL, &disks, &disk_count, 0); // Try to create two disks RAID 1. uint32_t free_disk_count = 0; lsm_disk *free_disks[2]; int i; for (i = 0; i< disk_count; i++){ if (lsm_disk_status_get(disks[i]) & LSM_DISK_STATUS_FREE){ free_disks[free_disk_count++] = disks[i]; if (free_disk_count == 2){ break; } } } fail_unless(free_disk_count == 2, "Failed to find two free disks"); lsm_volume *new_volume = NULL; G(rc, lsm_volume_raid_create, c, "test_volume_raid_create", LSM_VOLUME_RAID_TYPE_RAID1, free_disks, free_disk_count, LSM_VOLUME_VCR_STRIP_SIZE_DEFAULT, &new_volume, LSM_CLIENT_FLAG_RSVD); char *job_del = NULL; int del_rc = lsm_volume_delete( c, new_volume, &job_del, LSM_CLIENT_FLAG_RSVD); fail_unless( del_rc == LSM_ERR_OK || del_rc == LSM_ERR_JOB_STARTED, "lsm_volume_delete %d (%s)", rc, error(lsm_error_last_get(c))); if( LSM_ERR_JOB_STARTED == del_rc ) { wait_for_job_vol(c, &job_del); } G(rc, lsm_disk_record_array_free, disks, disk_count); // The new pool should be automatically be deleted when volume got // deleted. lsm_pool **pools = NULL; uint32_t count = 0; G( rc, lsm_pool_list, c, "id", lsm_volume_pool_id_get(new_volume), &pools, &count, LSM_CLIENT_FLAG_RSVD); fail_unless( count == 0, "New HW RAID pool still exists, it should be deleted along with " "lsm_volume_delete()"); lsm_pool_record_array_free(pools, count); G(rc, lsm_volume_record_free, new_volume); } END_TEST Suite * lsm_suite(void) { Suite *s = suite_create("libStorageMgmt"); TCase *basic = tcase_create("Basic"); tcase_add_checked_fixture (basic, setup, teardown); tcase_add_test(basic, test_volume_vpd_check); tcase_add_test(basic, test_initiator_id_verification); tcase_add_test(basic, test_target_ports); tcase_add_test(basic, test_search_fs); tcase_add_test(basic, test_search_access_groups); tcase_add_test(basic, test_search_disks); tcase_add_test(basic, test_search_volumes); tcase_add_test(basic, test_search_pools); tcase_add_test(basic, test_uri_parse); tcase_add_test(basic, test_error_reporting); tcase_add_test(basic, test_capability); tcase_add_test(basic, test_nfs_export_funcs); tcase_add_test(basic, test_disks); tcase_add_test(basic, test_plugin_info); tcase_add_test(basic, test_get_available_plugins); tcase_add_test(basic, test_volume_methods); tcase_add_test(basic, test_iscsi_auth_in); tcase_add_test(basic, test_capabilities); tcase_add_test(basic, test_smoke_test); tcase_add_test(basic, test_access_groups); tcase_add_test(basic, test_systems); tcase_add_test(basic, test_access_groups_grant_revoke); tcase_add_test(basic, test_fs); tcase_add_test(basic, test_ss); tcase_add_test(basic, test_nfs_exports); tcase_add_test(basic, test_invalid_input); tcase_add_test(basic, test_volume_raid_info); tcase_add_test(basic, test_pool_member_info); tcase_add_test(basic, test_volume_raid_create_cap_get); tcase_add_test(basic, test_volume_raid_create); suite_add_tcase(s, basic); return s; } int main(int argc, char** argv) { int number_failed; Suite *s = lsm_suite(); SRunner *sr = srunner_create(s); /* * Don't run python plug-in tests if we are looking for * memory leaks. */ if( !getenv("LSM_VALGRIND") ) { srunner_run_all(sr, CK_NORMAL); } /* Switch plug-in backend to test C language compat. */ which_plugin = 1; srunner_run_all(sr, CK_NORMAL); number_failed = srunner_ntests_failed(sr); srunner_free(sr); return(number_failed == 0) ? EXIT_SUCCESS : EXIT_FAILURE; } libstoragemgmt-1.2.3/test/Makefile.in0000664000175000017500000010365412542455445014527 00000000000000# Makefile.in generated by automake 1.13.4 from Makefile.am. # @configure_input@ # Copyright (C) 1994-2013 Free Software Foundation, Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__is_gnu_make = test -n '$(MAKEFILE_LIST)' && test -n '$(MAKELEVEL)' am__make_running_with_option = \ case $${target_option-} in \ ?) ;; \ *) echo "am__make_running_with_option: internal error: invalid" \ "target option '$${target_option-}' specified" >&2; \ exit 1;; \ esac; \ has_opt=no; \ sane_makeflags=$$MAKEFLAGS; \ if $(am__is_gnu_make); then \ sane_makeflags=$$MFLAGS; \ else \ case $$MAKEFLAGS in \ *\\[\ \ ]*) \ bs=\\; \ sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \ | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \ esac; \ fi; \ skip_next=no; \ strip_trailopt () \ { \ flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \ }; \ for flg in $$sane_makeflags; do \ test $$skip_next = yes && { skip_next=no; continue; }; \ case $$flg in \ *=*|--*) continue;; \ -*I) strip_trailopt 'I'; skip_next=yes;; \ -*I?*) strip_trailopt 'I';; \ -*O) strip_trailopt 'O'; skip_next=yes;; \ -*O?*) strip_trailopt 'O';; \ -*l) strip_trailopt 'l'; skip_next=yes;; \ -*l?*) strip_trailopt 'l';; \ -[dEDm]) skip_next=yes;; \ -[JT]) skip_next=yes;; \ esac; \ case $$flg in \ *$$target_option*) has_opt=yes; break;; \ esac; \ done; \ test $$has_opt = yes am__make_dryrun = (target_option=n; $(am__make_running_with_option)) am__make_keepgoing = (target_option=k; $(am__make_running_with_option)) pkgdatadir = $(datadir)/@PACKAGE@ pkgincludedir = $(includedir)/@PACKAGE@ pkglibdir = $(libdir)/@PACKAGE@ pkglibexecdir = $(libexecdir)/@PACKAGE@ am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ check_PROGRAMS = tester$(EXEEXT) subdir = test DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/Makefile.am \ $(top_srcdir)/build-aux/depcomp \ $(top_srcdir)/build-aux/test-driver ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = $(top_srcdir)/m4/ax_python_module.m4 \ $(top_srcdir)/m4/libtool.m4 $(top_srcdir)/m4/ltoptions.m4 \ $(top_srcdir)/m4/ltsugar.m4 $(top_srcdir)/m4/ltversion.m4 \ $(top_srcdir)/m4/lt~obsolete.m4 $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) mkinstalldirs = $(install_sh) -d CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = CONFIG_CLEAN_VPATH_FILES = am_tester_OBJECTS = tester-tester.$(OBJEXT) tester_OBJECTS = $(am_tester_OBJECTS) am__DEPENDENCIES_1 = tester_DEPENDENCIES = ../c_binding/libstoragemgmt.la \ $(am__DEPENDENCIES_1) AM_V_lt = $(am__v_lt_@AM_V@) am__v_lt_ = $(am__v_lt_@AM_DEFAULT_V@) am__v_lt_0 = --silent am__v_lt_1 = tester_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CC $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CCLD) $(tester_CFLAGS) $(CFLAGS) \ $(AM_LDFLAGS) $(LDFLAGS) -o $@ AM_V_P = $(am__v_P_@AM_V@) am__v_P_ = $(am__v_P_@AM_DEFAULT_V@) am__v_P_0 = false am__v_P_1 = : AM_V_GEN = $(am__v_GEN_@AM_V@) am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@) am__v_GEN_0 = @echo " GEN " $@; am__v_GEN_1 = AM_V_at = $(am__v_at_@AM_V@) am__v_at_ = $(am__v_at_@AM_DEFAULT_V@) am__v_at_0 = @ am__v_at_1 = DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir) depcomp = $(SHELL) $(top_srcdir)/build-aux/depcomp am__depfiles_maybe = depfiles am__mv = mv -f COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \ $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) LTCOMPILE = $(LIBTOOL) $(AM_V_lt) --tag=CC $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) \ $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) \ $(AM_CFLAGS) $(CFLAGS) AM_V_CC = $(am__v_CC_@AM_V@) am__v_CC_ = $(am__v_CC_@AM_DEFAULT_V@) am__v_CC_0 = @echo " CC " $@; am__v_CC_1 = CCLD = $(CC) LINK = $(LIBTOOL) $(AM_V_lt) --tag=CC $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) \ $(AM_LDFLAGS) $(LDFLAGS) -o $@ AM_V_CCLD = $(am__v_CCLD_@AM_V@) am__v_CCLD_ = $(am__v_CCLD_@AM_DEFAULT_V@) am__v_CCLD_0 = @echo " CCLD " $@; am__v_CCLD_1 = SOURCES = $(tester_SOURCES) DIST_SOURCES = $(tester_SOURCES) am__can_run_installinfo = \ case $$AM_UPDATE_INFO_DIR in \ n|no|NO) false;; \ *) (install-info --version) >/dev/null 2>&1;; \ esac am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) # Read a list of newline-separated strings from the standard input, # and print each of them once, without duplicates. Input order is # *not* preserved. am__uniquify_input = $(AWK) '\ BEGIN { nonempty = 0; } \ { items[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in items) print i; }; } \ ' # Make sure the list of sources is unique. This is necessary because, # e.g., the same source file might be shared among _SOURCES variables # for different programs/libraries. am__define_uniq_tagged_files = \ list='$(am__tagged_files)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | $(am__uniquify_input)` ETAGS = etags CTAGS = ctags am__tty_colors_dummy = \ mgn= red= grn= lgn= blu= brg= std=; \ am__color_tests=no am__tty_colors = { \ $(am__tty_colors_dummy); \ if test "X$(AM_COLOR_TESTS)" = Xno; then \ am__color_tests=no; \ elif test "X$(AM_COLOR_TESTS)" = Xalways; then \ am__color_tests=yes; \ elif test "X$$TERM" != Xdumb && { test -t 1; } 2>/dev/null; then \ am__color_tests=yes; \ fi; \ if test $$am__color_tests = yes; then \ red=''; \ grn=''; \ lgn=''; \ blu=''; \ mgn=''; \ brg=''; \ std=''; \ fi; \ } am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; am__vpath_adj = case $$p in \ $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ *) f=$$p;; \ esac; am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`; am__install_max = 40 am__nobase_strip_setup = \ srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'` am__nobase_strip = \ for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||" am__nobase_list = $(am__nobase_strip_setup); \ for p in $$list; do echo "$$p $$p"; done | \ sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \ $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \ if (++n[$$2] == $(am__install_max)) \ { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \ END { for (dir in files) print dir, files[dir] }' am__base_list = \ sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \ sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g' am__uninstall_files_from_dir = { \ test -z "$$files" \ || { test ! -d "$$dir" && test ! -f "$$dir" && test ! -r "$$dir"; } \ || { echo " ( cd '$$dir' && rm -f" $$files ")"; \ $(am__cd) "$$dir" && rm -f $$files; }; \ } am__recheck_rx = ^[ ]*:recheck:[ ]* am__global_test_result_rx = ^[ ]*:global-test-result:[ ]* am__copy_in_global_log_rx = ^[ ]*:copy-in-global-log:[ ]* # A command that, given a newline-separated list of test names on the # standard input, print the name of the tests that are to be re-run # upon "make recheck". am__list_recheck_tests = $(AWK) '{ \ recheck = 1; \ while ((rc = (getline line < ($$0 ".trs"))) != 0) \ { \ if (rc < 0) \ { \ if ((getline line2 < ($$0 ".log")) < 0) \ recheck = 0; \ break; \ } \ else if (line ~ /$(am__recheck_rx)[nN][Oo]/) \ { \ recheck = 0; \ break; \ } \ else if (line ~ /$(am__recheck_rx)[yY][eE][sS]/) \ { \ break; \ } \ }; \ if (recheck) \ print $$0; \ close ($$0 ".trs"); \ close ($$0 ".log"); \ }' # A command that, given a newline-separated list of test names on the # standard input, create the global log from their .trs and .log files. am__create_global_log = $(AWK) ' \ function fatal(msg) \ { \ print "fatal: making $@: " msg | "cat >&2"; \ exit 1; \ } \ function rst_section(header) \ { \ print header; \ len = length(header); \ for (i = 1; i <= len; i = i + 1) \ printf "="; \ printf "\n\n"; \ } \ { \ copy_in_global_log = 1; \ global_test_result = "RUN"; \ while ((rc = (getline line < ($$0 ".trs"))) != 0) \ { \ if (rc < 0) \ fatal("failed to read from " $$0 ".trs"); \ if (line ~ /$(am__global_test_result_rx)/) \ { \ sub("$(am__global_test_result_rx)", "", line); \ sub("[ ]*$$", "", line); \ global_test_result = line; \ } \ else if (line ~ /$(am__copy_in_global_log_rx)[nN][oO]/) \ copy_in_global_log = 0; \ }; \ if (copy_in_global_log) \ { \ rst_section(global_test_result ": " $$0); \ while ((rc = (getline line < ($$0 ".log"))) != 0) \ { \ if (rc < 0) \ fatal("failed to read from " $$0 ".log"); \ print line; \ }; \ printf "\n"; \ }; \ close ($$0 ".trs"); \ close ($$0 ".log"); \ }' # Restructured Text title. am__rst_title = { sed 's/.*/ & /;h;s/./=/g;p;x;s/ *$$//;p;g' && echo; } # Solaris 10 'make', and several other traditional 'make' implementations, # pass "-e" to $(SHELL), and POSIX 2008 even requires this. Work around it # by disabling -e (using the XSI extension "set +e") if it's set. am__sh_e_setup = case $$- in *e*) set +e;; esac # Default flags passed to test drivers. am__common_driver_flags = \ --color-tests "$$am__color_tests" \ --enable-hard-errors "$$am__enable_hard_errors" \ --expect-failure "$$am__expect_failure" # To be inserted before the command running the test. Creates the # directory for the log if needed. Stores in $dir the directory # containing $f, in $tst the test, in $log the log. Executes the # developer- defined test setup AM_TESTS_ENVIRONMENT (if any), and # passes TESTS_ENVIRONMENT. Set up options for the wrapper that # will run the test scripts (or their associated LOG_COMPILER, if # thy have one). am__check_pre = \ $(am__sh_e_setup); \ $(am__vpath_adj_setup) $(am__vpath_adj) \ $(am__tty_colors); \ srcdir=$(srcdir); export srcdir; \ case "$@" in \ */*) am__odir=`echo "./$@" | sed 's|/[^/]*$$||'`;; \ *) am__odir=.;; \ esac; \ test "x$$am__odir" = x"." || test -d "$$am__odir" \ || $(MKDIR_P) "$$am__odir" || exit $$?; \ if test -f "./$$f"; then dir=./; \ elif test -f "$$f"; then dir=; \ else dir="$(srcdir)/"; fi; \ tst=$$dir$$f; log='$@'; \ if test -n '$(DISABLE_HARD_ERRORS)'; then \ am__enable_hard_errors=no; \ else \ am__enable_hard_errors=yes; \ fi; \ case " $(XFAIL_TESTS) " in \ *[\ \ ]$$f[\ \ ]* | *[\ \ ]$$dir$$f[\ \ ]*) \ am__expect_failure=yes;; \ *) \ am__expect_failure=no;; \ esac; \ $(AM_TESTS_ENVIRONMENT) $(TESTS_ENVIRONMENT) # A shell command to get the names of the tests scripts with any registered # extension removed (i.e., equivalently, the names of the test logs, with # the '.log' extension removed). The result is saved in the shell variable # '$bases'. This honors runtime overriding of TESTS and TEST_LOGS. Sadly, # we cannot use something simpler, involving e.g., "$(TEST_LOGS:.log=)", # since that might cause problem with VPATH rewrites for suffix-less tests. # See also 'test-harness-vpath-rewrite.sh' and 'test-trs-basic.sh'. am__set_TESTS_bases = \ bases='$(TEST_LOGS)'; \ bases=`for i in $$bases; do echo $$i; done | sed 's/\.log$$//'`; \ bases=`echo $$bases` RECHECK_LOGS = $(TEST_LOGS) AM_RECURSIVE_TARGETS = check recheck TEST_SUITE_LOG = test-suite.log TEST_EXTENSIONS = @EXEEXT@ .test LOG_DRIVER = $(SHELL) $(top_srcdir)/build-aux/test-driver LOG_COMPILE = $(LOG_COMPILER) $(AM_LOG_FLAGS) $(LOG_FLAGS) am__set_b = \ case '$@' in \ */*) \ case '$*' in \ */*) b='$*';; \ *) b=`echo '$@' | sed 's/\.log$$//'`; \ esac;; \ *) \ b='$*';; \ esac am__test_logs1 = $(TESTS:=.log) am__test_logs2 = $(am__test_logs1:@EXEEXT@.log=.log) TEST_LOGS = $(am__test_logs2:.test.log=.log) TEST_LOG_DRIVER = $(SHELL) $(top_srcdir)/build-aux/test-driver TEST_LOG_COMPILE = $(TEST_LOG_COMPILER) $(AM_TEST_LOG_FLAGS) \ $(TEST_LOG_FLAGS) DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) ACLOCAL = @ACLOCAL@ AMTAR = @AMTAR@ AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@ AR = @AR@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ FGREP = @FGREP@ GREP = @GREP@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ JSON_CFLAGS = @JSON_CFLAGS@ JSON_LIBS = @JSON_LIBS@ LD = @LD@ LDFLAGS = @LDFLAGS@ LIBCHECK_CFLAGS = @LIBCHECK_CFLAGS@ LIBCHECK_LIBS = @LIBCHECK_LIBS@ LIBCONFIG_CFLAGS = @LIBCONFIG_CFLAGS@ LIBCONFIG_LIBS = @LIBCONFIG_LIBS@ LIBGLIB_CFLAGS = @LIBGLIB_CFLAGS@ LIBGLIB_LIBS = @LIBGLIB_LIBS@ LIBMICROHTTPD_CFLAGS = @LIBMICROHTTPD_CFLAGS@ LIBMICROHTTPD_LIBS = @LIBMICROHTTPD_LIBS@ LIBOBJS = @LIBOBJS@ LIBS = @LIBS@ LIBSM_LIBTOOL_VERSION = @LIBSM_LIBTOOL_VERSION@ LIBSM_MAJOR_VERSION = @LIBSM_MAJOR_VERSION@ LIBSM_MICRO_VERSION = @LIBSM_MICRO_VERSION@ LIBSM_MINOR_VERSION = @LIBSM_MINOR_VERSION@ LIBSM_VERSION = @LIBSM_VERSION@ LIBSM_VERSION_INFO = @LIBSM_VERSION_INFO@ LIBSM_VERSION_NUMBER = @LIBSM_VERSION_NUMBER@ LIBTOOL = @LIBTOOL@ LIBXML_CFLAGS = @LIBXML_CFLAGS@ LIBXML_LIBS = @LIBXML_LIBS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBOBJS = @LTLIBOBJS@ MAINT = @MAINT@ MAKEINFO = @MAKEINFO@ MANIFEST_TOOL = @MANIFEST_TOOL@ MKDIR_P = @MKDIR_P@ NM = @NM@ NMEDIT = @NMEDIT@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_URL = @PACKAGE_URL@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PERL = @PERL@ PKG_CONFIG = @PKG_CONFIG@ PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@ PKG_CONFIG_PATH = @PKG_CONFIG_PATH@ PYTHON = @PYTHON@ PYTHON_EXEC_PREFIX = @PYTHON_EXEC_PREFIX@ PYTHON_PLATFORM = @PYTHON_PLATFORM@ PYTHON_PREFIX = @PYTHON_PREFIX@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SSL_LIBS = @SSL_LIBS@ STRIP = @STRIP@ VERSION = @VERSION@ YAJL_LIBS = @YAJL_LIBS@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_AR = @ac_ct_AR@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ bashcompletiondir = @bashcompletiondir@ bindir = @bindir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ datadir = @datadir@ datarootdir = @datarootdir@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ install_sh = @install_sh@ libdir = @libdir@ libexecdir = @libexecdir@ localedir = @localedir@ localstatedir = @localstatedir@ mandir = @mandir@ mkdir_p = @mkdir_p@ oldincludedir = @oldincludedir@ pdfdir = @pdfdir@ pkgpyexecdir = @pkgpyexecdir@ pkgpythondir = @pkgpythondir@ prefix = @prefix@ program_transform_name = @program_transform_name@ psdir = @psdir@ pyexecdir = @pyexecdir@ pythondir = @pythondir@ sbindir = @sbindir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ systemdsystemunitdir = @systemdsystemunitdir@ target_alias = @target_alias@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ AM_CPPFLAGS = \ -I$(top_srcdir)/c_binding/include \ -I@srcdir@/c_binding/include \ $(LIBXML_CFLAGS) EXTRA_DIST = cmdtest.py runtests.sh plugin_test.py TESTS = runtests.sh tester_CFLAGS = $(LIBCHECK_CFLAGS) tester_LDADD = ../c_binding/libstoragemgmt.la $(LIBCHECK_LIBS) tester_SOURCES = tester.c all: all-am .SUFFIXES: .SUFFIXES: .c .lo .log .o .obj .test .test$(EXEEXT) .trs $(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu test/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --gnu test/Makefile .PRECIOUS: Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): clean-checkPROGRAMS: @list='$(check_PROGRAMS)'; test -n "$$list" || exit 0; \ echo " rm -f" $$list; \ rm -f $$list || exit $$?; \ test -n "$(EXEEXT)" || exit 0; \ list=`for p in $$list; do echo "$$p"; done | sed 's/$(EXEEXT)$$//'`; \ echo " rm -f" $$list; \ rm -f $$list tester$(EXEEXT): $(tester_OBJECTS) $(tester_DEPENDENCIES) $(EXTRA_tester_DEPENDENCIES) @rm -f tester$(EXEEXT) $(AM_V_CCLD)$(tester_LINK) $(tester_OBJECTS) $(tester_LDADD) $(LIBS) mostlyclean-compile: -rm -f *.$(OBJEXT) distclean-compile: -rm -f *.tab.c @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/tester-tester.Po@am__quote@ .c.o: @am__fastdepCC_TRUE@ $(AM_V_CC)depbase=`echo $@ | sed 's|[^/]*$$|$(DEPDIR)/&|;s|\.o$$||'`;\ @am__fastdepCC_TRUE@ $(COMPILE) -MT $@ -MD -MP -MF $$depbase.Tpo -c -o $@ $< &&\ @am__fastdepCC_TRUE@ $(am__mv) $$depbase.Tpo $$depbase.Po @AMDEP_TRUE@@am__fastdepCC_FALSE@ $(AM_V_CC)source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCC_FALSE@ $(AM_V_CC@am__nodep@)$(COMPILE) -c -o $@ $< .c.obj: @am__fastdepCC_TRUE@ $(AM_V_CC)depbase=`echo $@ | sed 's|[^/]*$$|$(DEPDIR)/&|;s|\.obj$$||'`;\ @am__fastdepCC_TRUE@ $(COMPILE) -MT $@ -MD -MP -MF $$depbase.Tpo -c -o $@ `$(CYGPATH_W) '$<'` &&\ @am__fastdepCC_TRUE@ $(am__mv) $$depbase.Tpo $$depbase.Po @AMDEP_TRUE@@am__fastdepCC_FALSE@ $(AM_V_CC)source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCC_FALSE@ $(AM_V_CC@am__nodep@)$(COMPILE) -c -o $@ `$(CYGPATH_W) '$<'` .c.lo: @am__fastdepCC_TRUE@ $(AM_V_CC)depbase=`echo $@ | sed 's|[^/]*$$|$(DEPDIR)/&|;s|\.lo$$||'`;\ @am__fastdepCC_TRUE@ $(LTCOMPILE) -MT $@ -MD -MP -MF $$depbase.Tpo -c -o $@ $< &&\ @am__fastdepCC_TRUE@ $(am__mv) $$depbase.Tpo $$depbase.Plo @AMDEP_TRUE@@am__fastdepCC_FALSE@ $(AM_V_CC)source='$<' object='$@' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCC_FALSE@ $(AM_V_CC@am__nodep@)$(LTCOMPILE) -c -o $@ $< tester-tester.o: tester.c @am__fastdepCC_TRUE@ $(AM_V_CC)$(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(tester_CFLAGS) $(CFLAGS) -MT tester-tester.o -MD -MP -MF $(DEPDIR)/tester-tester.Tpo -c -o tester-tester.o `test -f 'tester.c' || echo '$(srcdir)/'`tester.c @am__fastdepCC_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/tester-tester.Tpo $(DEPDIR)/tester-tester.Po @AMDEP_TRUE@@am__fastdepCC_FALSE@ $(AM_V_CC)source='tester.c' object='tester-tester.o' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCC_FALSE@ $(AM_V_CC@am__nodep@)$(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(tester_CFLAGS) $(CFLAGS) -c -o tester-tester.o `test -f 'tester.c' || echo '$(srcdir)/'`tester.c tester-tester.obj: tester.c @am__fastdepCC_TRUE@ $(AM_V_CC)$(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(tester_CFLAGS) $(CFLAGS) -MT tester-tester.obj -MD -MP -MF $(DEPDIR)/tester-tester.Tpo -c -o tester-tester.obj `if test -f 'tester.c'; then $(CYGPATH_W) 'tester.c'; else $(CYGPATH_W) '$(srcdir)/tester.c'; fi` @am__fastdepCC_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/tester-tester.Tpo $(DEPDIR)/tester-tester.Po @AMDEP_TRUE@@am__fastdepCC_FALSE@ $(AM_V_CC)source='tester.c' object='tester-tester.obj' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCC_FALSE@ $(AM_V_CC@am__nodep@)$(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(tester_CFLAGS) $(CFLAGS) -c -o tester-tester.obj `if test -f 'tester.c'; then $(CYGPATH_W) 'tester.c'; else $(CYGPATH_W) '$(srcdir)/tester.c'; fi` mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs ID: $(am__tagged_files) $(am__define_uniq_tagged_files); mkid -fID $$unique tags: tags-am TAGS: tags tags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files) set x; \ here=`pwd`; \ $(am__define_uniq_tagged_files); \ shift; \ if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ if test $$# -gt 0; then \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ "$$@" $$unique; \ else \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ $$unique; \ fi; \ fi ctags: ctags-am CTAGS: ctags ctags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files) $(am__define_uniq_tagged_files); \ test -z "$(CTAGS_ARGS)$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$unique GTAGS: here=`$(am__cd) $(top_builddir) && pwd` \ && $(am__cd) $(top_srcdir) \ && gtags -i $(GTAGS_ARGS) "$$here" cscopelist: cscopelist-am cscopelist-am: $(am__tagged_files) list='$(am__tagged_files)'; \ case "$(srcdir)" in \ [\\/]* | ?:[\\/]*) sdir="$(srcdir)" ;; \ *) sdir=$(subdir)/$(srcdir) ;; \ esac; \ for i in $$list; do \ if test -f "$$i"; then \ echo "$(subdir)/$$i"; \ else \ echo "$$sdir/$$i"; \ fi; \ done >> $(top_builddir)/cscope.files distclean-tags: -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags # Recover from deleted '.trs' file; this should ensure that # "rm -f foo.log; make foo.trs" re-run 'foo.test', and re-create # both 'foo.log' and 'foo.trs'. Break the recipe in two subshells # to avoid problems with "make -n". .log.trs: rm -f $< $@ $(MAKE) $(AM_MAKEFLAGS) $< # Leading 'am--fnord' is there to ensure the list of targets does not # expand to empty, as could happen e.g. with make check TESTS=''. am--fnord $(TEST_LOGS) $(TEST_LOGS:.log=.trs): $(am__force_recheck) am--force-recheck: @: $(TEST_SUITE_LOG): $(TEST_LOGS) @$(am__set_TESTS_bases); \ am__f_ok () { test -f "$$1" && test -r "$$1"; }; \ redo_bases=`for i in $$bases; do \ am__f_ok $$i.trs && am__f_ok $$i.log || echo $$i; \ done`; \ if test -n "$$redo_bases"; then \ redo_logs=`for i in $$redo_bases; do echo $$i.log; done`; \ redo_results=`for i in $$redo_bases; do echo $$i.trs; done`; \ if $(am__make_dryrun); then :; else \ rm -f $$redo_logs && rm -f $$redo_results || exit 1; \ fi; \ fi; \ if test -n "$$am__remaking_logs"; then \ echo "fatal: making $(TEST_SUITE_LOG): possible infinite" \ "recursion detected" >&2; \ else \ am__remaking_logs=yes $(MAKE) $(AM_MAKEFLAGS) $$redo_logs; \ fi; \ if $(am__make_dryrun); then :; else \ st=0; \ errmsg="fatal: making $(TEST_SUITE_LOG): failed to create"; \ for i in $$redo_bases; do \ test -f $$i.trs && test -r $$i.trs \ || { echo "$$errmsg $$i.trs" >&2; st=1; }; \ test -f $$i.log && test -r $$i.log \ || { echo "$$errmsg $$i.log" >&2; st=1; }; \ done; \ test $$st -eq 0 || exit 1; \ fi @$(am__sh_e_setup); $(am__tty_colors); $(am__set_TESTS_bases); \ ws='[ ]'; \ results=`for b in $$bases; do echo $$b.trs; done`; \ test -n "$$results" || results=/dev/null; \ all=` grep "^$$ws*:test-result:" $$results | wc -l`; \ pass=` grep "^$$ws*:test-result:$$ws*PASS" $$results | wc -l`; \ fail=` grep "^$$ws*:test-result:$$ws*FAIL" $$results | wc -l`; \ skip=` grep "^$$ws*:test-result:$$ws*SKIP" $$results | wc -l`; \ xfail=`grep "^$$ws*:test-result:$$ws*XFAIL" $$results | wc -l`; \ xpass=`grep "^$$ws*:test-result:$$ws*XPASS" $$results | wc -l`; \ error=`grep "^$$ws*:test-result:$$ws*ERROR" $$results | wc -l`; \ if test `expr $$fail + $$xpass + $$error` -eq 0; then \ success=true; \ else \ success=false; \ fi; \ br='==================='; br=$$br$$br$$br$$br; \ result_count () \ { \ if test x"$$1" = x"--maybe-color"; then \ maybe_colorize=yes; \ elif test x"$$1" = x"--no-color"; then \ maybe_colorize=no; \ else \ echo "$@: invalid 'result_count' usage" >&2; exit 4; \ fi; \ shift; \ desc=$$1 count=$$2; \ if test $$maybe_colorize = yes && test $$count -gt 0; then \ color_start=$$3 color_end=$$std; \ else \ color_start= color_end=; \ fi; \ echo "$${color_start}# $$desc $$count$${color_end}"; \ }; \ create_testsuite_report () \ { \ result_count $$1 "TOTAL:" $$all "$$brg"; \ result_count $$1 "PASS: " $$pass "$$grn"; \ result_count $$1 "SKIP: " $$skip "$$blu"; \ result_count $$1 "XFAIL:" $$xfail "$$lgn"; \ result_count $$1 "FAIL: " $$fail "$$red"; \ result_count $$1 "XPASS:" $$xpass "$$red"; \ result_count $$1 "ERROR:" $$error "$$mgn"; \ }; \ { \ echo "$(PACKAGE_STRING): $(subdir)/$(TEST_SUITE_LOG)" | \ $(am__rst_title); \ create_testsuite_report --no-color; \ echo; \ echo ".. contents:: :depth: 2"; \ echo; \ for b in $$bases; do echo $$b; done \ | $(am__create_global_log); \ } >$(TEST_SUITE_LOG).tmp || exit 1; \ mv $(TEST_SUITE_LOG).tmp $(TEST_SUITE_LOG); \ if $$success; then \ col="$$grn"; \ else \ col="$$red"; \ test x"$$VERBOSE" = x || cat $(TEST_SUITE_LOG); \ fi; \ echo "$${col}$$br$${std}"; \ echo "$${col}Testsuite summary for $(PACKAGE_STRING)$${std}"; \ echo "$${col}$$br$${std}"; \ create_testsuite_report --maybe-color; \ echo "$$col$$br$$std"; \ if $$success; then :; else \ echo "$${col}See $(subdir)/$(TEST_SUITE_LOG)$${std}"; \ if test -n "$(PACKAGE_BUGREPORT)"; then \ echo "$${col}Please report to $(PACKAGE_BUGREPORT)$${std}"; \ fi; \ echo "$$col$$br$$std"; \ fi; \ $$success || exit 1 check-TESTS: @list='$(RECHECK_LOGS)'; test -z "$$list" || rm -f $$list @list='$(RECHECK_LOGS:.log=.trs)'; test -z "$$list" || rm -f $$list @test -z "$(TEST_SUITE_LOG)" || rm -f $(TEST_SUITE_LOG) @set +e; $(am__set_TESTS_bases); \ log_list=`for i in $$bases; do echo $$i.log; done`; \ trs_list=`for i in $$bases; do echo $$i.trs; done`; \ log_list=`echo $$log_list`; trs_list=`echo $$trs_list`; \ $(MAKE) $(AM_MAKEFLAGS) $(TEST_SUITE_LOG) TEST_LOGS="$$log_list"; \ exit $$?; recheck: all $(check_PROGRAMS) @test -z "$(TEST_SUITE_LOG)" || rm -f $(TEST_SUITE_LOG) @set +e; $(am__set_TESTS_bases); \ bases=`for i in $$bases; do echo $$i; done \ | $(am__list_recheck_tests)` || exit 1; \ log_list=`for i in $$bases; do echo $$i.log; done`; \ log_list=`echo $$log_list`; \ $(MAKE) $(AM_MAKEFLAGS) $(TEST_SUITE_LOG) \ am__force_recheck=am--force-recheck \ TEST_LOGS="$$log_list"; \ exit $$? runtests.sh.log: runtests.sh @p='runtests.sh'; \ b='runtests.sh'; \ $(am__check_pre) $(LOG_DRIVER) --test-name "$$f" \ --log-file $$b.log --trs-file $$b.trs \ $(am__common_driver_flags) $(AM_LOG_DRIVER_FLAGS) $(LOG_DRIVER_FLAGS) -- $(LOG_COMPILE) \ "$$tst" $(AM_TESTS_FD_REDIRECT) .test.log: @p='$<'; \ $(am__set_b); \ $(am__check_pre) $(TEST_LOG_DRIVER) --test-name "$$f" \ --log-file $$b.log --trs-file $$b.trs \ $(am__common_driver_flags) $(AM_TEST_LOG_DRIVER_FLAGS) $(TEST_LOG_DRIVER_FLAGS) -- $(TEST_LOG_COMPILE) \ "$$tst" $(AM_TESTS_FD_REDIRECT) @am__EXEEXT_TRUE@.test$(EXEEXT).log: @am__EXEEXT_TRUE@ @p='$<'; \ @am__EXEEXT_TRUE@ $(am__set_b); \ @am__EXEEXT_TRUE@ $(am__check_pre) $(TEST_LOG_DRIVER) --test-name "$$f" \ @am__EXEEXT_TRUE@ --log-file $$b.log --trs-file $$b.trs \ @am__EXEEXT_TRUE@ $(am__common_driver_flags) $(AM_TEST_LOG_DRIVER_FLAGS) $(TEST_LOG_DRIVER_FLAGS) -- $(TEST_LOG_COMPILE) \ @am__EXEEXT_TRUE@ "$$tst" $(AM_TESTS_FD_REDIRECT) distdir: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done check-am: all-am $(MAKE) $(AM_MAKEFLAGS) $(check_PROGRAMS) $(MAKE) $(AM_MAKEFLAGS) check-TESTS check: check-am all-am: Makefile installdirs: install: install-am install-exec: install-exec-am install-data: install-data-am uninstall: uninstall-am install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-am install-strip: if test -z '$(STRIP)'; then \ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ install; \ else \ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ fi mostlyclean-generic: -test -z "$(TEST_LOGS)" || rm -f $(TEST_LOGS) -test -z "$(TEST_LOGS:.log=.trs)" || rm -f $(TEST_LOGS:.log=.trs) -test -z "$(TEST_SUITE_LOG)" || rm -f $(TEST_SUITE_LOG) clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-am clean-am: clean-checkPROGRAMS clean-generic clean-libtool \ mostlyclean-am distclean: distclean-am -rm -rf ./$(DEPDIR) -rm -f Makefile distclean-am: clean-am distclean-compile distclean-generic \ distclean-tags dvi: dvi-am dvi-am: html: html-am html-am: info: info-am info-am: install-data-am: install-dvi: install-dvi-am install-dvi-am: install-exec-am: install-html: install-html-am install-html-am: install-info: install-info-am install-info-am: install-man: install-pdf: install-pdf-am install-pdf-am: install-ps: install-ps-am install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-am -rm -rf ./$(DEPDIR) -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-am mostlyclean-am: mostlyclean-compile mostlyclean-generic \ mostlyclean-libtool pdf: pdf-am pdf-am: ps: ps-am ps-am: uninstall-am: .MAKE: check-am install-am install-strip .PHONY: CTAGS GTAGS TAGS all all-am check check-TESTS check-am clean \ clean-checkPROGRAMS clean-generic clean-libtool cscopelist-am \ ctags ctags-am distclean distclean-compile distclean-generic \ distclean-libtool distclean-tags distdir dvi dvi-am html \ html-am info info-am install install-am install-data \ install-data-am install-dvi install-dvi-am install-exec \ install-exec-am install-html install-html-am install-info \ install-info-am install-man install-pdf install-pdf-am \ install-ps install-ps-am install-strip installcheck \ installcheck-am installdirs maintainer-clean \ maintainer-clean-generic mostlyclean mostlyclean-compile \ mostlyclean-generic mostlyclean-libtool pdf pdf-am ps ps-am \ recheck tags tags-am uninstall uninstall-am # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: libstoragemgmt-1.2.3/test/cmdtest.py0000775000175000017500000005615112537737032014500 00000000000000#!/usr/bin/env python2 # Copyright (C) 2011-2014 Red Hat, Inc. # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 2 # of the License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; If not, see . # USA. # # Author: tasleson #Description: Query array capabilities and run very basic operational tests. # # Note: This file is GPL copyright and not LGPL because: # 1. It is used to test the library, not provide functionality for it. # 2. It uses a function copied from anaconda library which is GPLv2 or later, # thus this code must be GPL as well. import random import string import sys import hashlib import os from subprocess import Popen, PIPE from optparse import OptionParser (OP_SYS, OP_POOL, OP_VOL, OP_FS, OP_EXPORTS, OP_SS) = \ ('SYSTEMS', 'POOLS', 'VOLUMES', 'FS', 'EXPORTS', 'SNAPSHOTS') (ID, NAME) = (0, 1) (POOL_TOTAL, POOL_FREE, POOL_SYSTEM) = (2, 3, 4) (VOL_VPD, VOL_BS, VOL_BLOCKS, VOL_STATUS, VOL_SIZE) = (2, 3, 4, 5, 6) (INIT_TYPE) = 2 (FS_TOTAL, FS_FREE, FS_POOL_ID) = (2, 3, 4) (SYS_STATUS,) = (2,) iqn = ['iqn.1994-05.com.domain:01.89bd01', 'iqn.1994-05.com.domain:01.89bd02'] cmd = "lsmcli" sep = "," test_pool_name = 'lsm_test_aggr' test_fs_pool_id = '' test_disk_id = 'DISK_ID_00000' CUR_SYS_ID = None code_coverage = bool(os.getenv('LSM_PYTHON_COVERAGE', False)) def random_iqn(): """Logic taken from anaconda library""" s = "iqn.1994-05.com.domain:01." m = hashlib.md5() u = os.uname() for i in u: m.update(i) dig = m.hexdigest() for i in range(0, 6): s += dig[random.randrange(0, 32)] return s def rs(l): """ Generate a random string """ return 'lsm_' + ''.join( random.choice(string.ascii_uppercase) for x in range(l)) def call(command, expected_rc=0): """ Call an executable and return a tuple of exitcode, stdout, stderr """ if code_coverage: actual_command = ['coverage', 'run', '-o'] actual_command.extend(command) else: actual_command = command print actual_command, 'EXPECTED Exit [%d]' % expected_rc process = Popen(actual_command, stdout=PIPE, stderr=PIPE) out = process.communicate() if process.returncode != expected_rc: raise RuntimeError("exit code != %s, actual= %s, stdout= %s, " "stderr= %s" % (expected_rc, process.returncode, out[0], out[1])) return process.returncode, out[0], out[1] def parse(out): rc = [] for line in out.split('\n'): elem = line.split(sep) cleaned_elem = [] for e in elem: e = e.strip() cleaned_elem.append(e) if len(cleaned_elem) > 1: rc.append(cleaned_elem) return rc def parse_key_value(out): rc = [] for line in out.split('\n'): elem = line.split(sep) if len(elem) > 1: item = dict() for i in range(0, len(elem), 2): key = elem[i].strip() value = elem[i + 1].strip() item[key] = value rc.append(item) return rc def parse_display(op): rc = [] out = call([cmd, '-t' + sep, 'list', '--type', op])[1] for line in out.split('\n'): elem = line.split(sep) if len(elem) > 1: rc.append(list(d.strip() for d in elem)) return rc def name_to_id(op, name): out = parse_display(op) for i in out: if i[NAME] == name: return i[ID] return None def create_volume(pool): out = call([cmd, '-t' + sep, 'volume-create', '--name', rs(12), '--size', '30M', '--pool', pool, '--provisioning', 'DEFAULT'])[1] r = parse(out) return r[0][ID] def volume_delete(vol_id): call([cmd, '-t' + sep, '-f', 'volume-delete', '--vol', vol_id]) def fs_create(pool_id): out = call([cmd, '-t' + sep, 'fs-create', '--name', rs(12), '--size', '500M', '--pool', pool_id])[1] r = parse(out) return r[0][ID] def export_fs(fs_id): out = call([cmd, '-t' + sep, 'fs-export', '--fs', fs_id, '--rw-host', '192.168.0.1', '--root-host', '192.168.0.1', '--script'])[1] r = parse_key_value(out) return r[0]['ID'] def un_export_fs(export_id): call([cmd, 'fs-unexport', '--export', export_id]) def delete_fs(fs_id): call([cmd, '-t' + sep, '-f', 'fs-delete', '--fs', fs_id]) def access_group_create(init_id, system_id): out = call([cmd, '-t' + sep, 'access-group-create', '--name', rs(8), '--init', init_id, '--sys', system_id])[1] r = parse(out) return r[0][ID] def access_group_initiator_add(group, initiator): call([cmd, 'access-group-add', '--ag', group, '--init', initiator]) def access_group_remove_init(group, initiator): call([cmd, 'access-group-remove', '--ag', group, '--init', initiator]) def access_group_delete(group_id): call([cmd, '-t' + sep, 'access-group-delete', '--ag', group_id]) def volume_mask(group, volume_id): call([cmd, 'volume-mask', '--ag', group, '--vol', volume_id]) def volume_unmask(group, volume_id): call([cmd, 'volume-unmask', '--ag', group, '--vol', volume_id]) def volumes_accessible_by_access_group(ag_id): call([cmd, 'list', '--type', 'volumes', '--ag', ag_id]) def access_groups_granted_to_volume(vol_id): call([cmd, 'list', '--type', 'access_groups', '--vol', vol_id]) def resize_vol(vol_id): call([cmd, '-t' + sep, '-f', 'volume-resize', '--vol', vol_id, '--size', '60M']) call([cmd, '-t' + sep, '-f', 'volume-resize', '--vol', vol_id, '--size', '100M']) #Some devices cannot re-size down... #call([cmd, '--volume-resize', id, '--size', '30M' , '-t'+sep ]) def resize_fs(fs_id): call([cmd, '-t' + sep, '-f', 'fs-resize', '--fs', fs_id, '--size', '1G']) call([cmd, '-t' + sep, '-f', 'fs-resize', '--fs', fs_id, '--size', '750M']) call([cmd, '-t' + sep, '-f', 'fs-resize', '--fs', fs_id, '--size', '300M']) def map_init(init, volume): call([cmd, '-t' + sep, 'access-grant', '--init', init, '--vol', volume, '--access', 'RW']) def unmap(init, volume): call([cmd, 'access-revoke', '--init', init, '--vol', volume]) def clone_fs(fs_id): # TODO Change to --source_id instead of --source_name ? out = call([cmd, '-t' + sep, 'fs-clone', '--src-fs', fs_id, '--dst-name', 'cloned_' + rs(8)])[1] r = parse(out) return r[0][ID] def fs_child_dependancy(fs_id): call([cmd, 'fs-dependants', '--fs', fs_id]) def fs_child_dependancy_rm(fs_id): call([cmd, 'fs-dependants-rm', '--fs', fs_id]) def clone_file(fs_id): # TODO Make this work outside of the simulator call([cmd, 'file-clone', '--fs', fs_id, '--src', 'foo', '--dst', 'bar']) def create_ss(fs_id): out = call([cmd, '-t' + sep, 'fs-snap-create', '--name', rs(12), '--fs', fs_id])[1] r = parse(out) return r[0][ID] def delete_ss(fs_id, ss_id): call([cmd, '-f', 'fs-snap-delete', '--snap', ss_id, '--fs', fs_id]) def restore_ss(snapshot_id, fs_id): call([cmd, '-f', 'fs-snap-restore', '--snap', snapshot_id, '--fs', fs_id]) def volume_replicate(source_id, vol_type, pool=None): out = call([cmd, '-t' + sep, 'volume-replicate', '--vol', source_id, '--rep-type', vol_type, '--name', 'lun_' + vol_type + '_' + rs(12)])[1] r = parse(out) return r[0][ID] def volume_replicate_range_bs(system_id): """ Returns the replicated range block size. """ out = call([cmd, 'volume-replicate-range-block-size', '--sys', system_id])[1] return int(out) def volume_replicate_range(vol_id, dest_vol_id, rep_type, src_start, dest_start, count): out = call( [cmd, '-f', 'volume-replicate-range', '--src-vol', vol_id, '--rep-type', rep_type, '--dst-vol', dest_vol_id, '--src-start', str(src_start), '--dst-start', str(dest_start), '--count', str(count)]) def volume_child_dependency(vol_id): call([cmd, 'volume-dependants', '--vol', vol_id]) def volume_child_dependency_rm(vol_id): call([cmd, 'volume-dependants-rm', '--vol', vol_id]) def get_systems(): out = call([cmd, '-t' + sep, 'list', '--type', 'SYSTEMS'])[1] system_list = parse(out) return system_list def initiator_chap(initiator): call([cmd, 'iscsi-chap', '--init', initiator]) call([cmd, 'iscsi-chap', '--init', initiator, '--in-user', "foo", '--in-pass', "bar"]) call([cmd, 'iscsi-chap', '--init', initiator, '--in-user', "foo", '--in-pass', "bar", '--out-user', "foo", '--out-pass', "bar"]) def capabilities(system_id): """ Return a hash table of key:bool where key is supported operation """ rc = {} out = call([cmd, '-t' + sep, 'capabilities', '--sys', system_id])[1] results = parse(out) for r in results: rc[r[0]] = True if r[1] == 'SUPPORTED' else False return rc def get_existing_fs(system_id): out = call([cmd, '-t' + sep, 'list', '--type', 'FS', ])[1] results = parse(out) if len(results) > 0: return results[0][ID] return None def numbers(): vols = [] test_pool_id = name_to_id(OP_POOL, test_pool_name) for i in range(10): vols.append(create_volume(test_pool_id)) for i in vols: volume_delete(i) def display_check(display_list, system_id): s = [x for x in display_list if x != 'SNAPSHOTS'] for p in s: call([cmd, 'list', '--type', p]) call([cmd, '-H', 'list', '--type', p, ]) call([cmd, '-H', '-t' + sep, 'list', '--type', p]) if 'SNAPSHOTS' in display_list: fs_id = get_existing_fs(system_id) if fs_id: call([cmd, 'list', '--type', 'SNAPSHOTS', '--fs', fs_id]) if 'POOLS' in display_list: call([cmd, '-H', '-t' + sep, 'list', '--type', 'POOLS']) def test_display(cap, system_id): """ Crank through supported display operations making sure we get good status for each of them """ to_test = ['SYSTEMS', 'POOLS'] if cap['VOLUMES']: to_test.append('VOLUMES') if cap['FS']: to_test.append("FS") if cap['EXPORTS']: to_test.append("EXPORTS") if cap['ACCESS_GROUPS']: to_test.append("ACCESS_GROUPS") if cap['FS_SNAPSHOTS']: to_test.append('SNAPSHOTS') if cap['EXPORT_AUTH']: to_test.append('NFS_CLIENT_AUTH') if cap['EXPORTS']: to_test.append('EXPORTS') display_check(to_test, system_id) def test_block_creation(cap, system_id): vol_src = None test_pool_id = name_to_id(OP_POOL, test_pool_name) # Fail early if no pool is available if test_pool_id is None: print 'Pool %s is not available!' % test_pool_name exit(10) if cap['VOLUME_CREATE']: vol_src = create_volume(test_pool_id) if cap['VOLUME_RESIZE']: resize_vol(vol_src) if cap['VOLUME_REPLICATE'] and cap['VOLUME_DELETE']: if cap['VOLUME_REPLICATE_CLONE']: clone = volume_replicate(vol_src, 'CLONE', test_pool_id) volume_delete(clone) if cap['VOLUME_REPLICATE_COPY']: copy = volume_replicate(vol_src, 'COPY', test_pool_id) volume_delete(copy) if cap['VOLUME_REPLICATE_MIRROR_ASYNC']: m = volume_replicate(vol_src, 'MIRROR_ASYNC', test_pool_id) volume_delete(m) if cap['VOLUME_REPLICATE_MIRROR_SYNC']: m = volume_replicate(vol_src, 'MIRROR_SYNC', test_pool_id) volume_delete(m) if cap['VOLUME_COPY_RANGE_BLOCK_SIZE']: size = volume_replicate_range_bs(system_id) print 'sub volume replication block size is=', size if cap['VOLUME_COPY_RANGE']: if cap['VOLUME_COPY_RANGE_CLONE']: volume_replicate_range(vol_src, vol_src, "CLONE", 0, 10000, 100) if cap['VOLUME_COPY_RANGE_COPY']: volume_replicate_range(vol_src, vol_src, "COPY", 0, 10000, 100) if cap['VOLUME_CHILD_DEPENDENCY']: volume_child_dependency(vol_src) if cap['VOLUME_CHILD_DEPENDENCY_RM']: volume_child_dependency_rm(vol_src) if cap['VOLUME_DELETE']: volume_delete(vol_src) def test_fs_creation(cap, system_id): if test_fs_pool_id: pool_id = test_fs_pool_id else: pool_id = name_to_id(OP_POOL, test_pool_name) if cap['FS_CREATE']: fs_id = fs_create(pool_id) if cap['FS_RESIZE']: resize_fs(fs_id) if cap['FS_DELETE']: delete_fs(fs_id) if cap['FS_CLONE']: fs_id = fs_create(pool_id) clone = clone_fs(fs_id) test_display(cap, system_id) delete_fs(clone) delete_fs(fs_id) if cap['FILE_CLONE']: fs_id = fs_create(pool_id) clone_file(fs_id) test_display(cap, system_id) delete_fs(fs_id) if cap['FS_SNAPSHOT_CREATE'] and cap['FS_CREATE'] and cap['FS_DELETE'] \ and cap['FS_SNAPSHOT_DELETE']: #Snapshot create/delete fs_id = fs_create(pool_id) ss = create_ss(fs_id) test_display(cap, system_id) restore_ss(ss, fs_id) delete_ss(fs_id, ss) delete_fs(fs_id) if cap['FS_CHILD_DEPENDENCY']: fs_id = fs_create(pool_id) fs_child_dependancy(fs_id) delete_fs(fs_id) if cap['FS_CHILD_DEPENDENCY_RM']: fs_id = fs_create(pool_id) clone_fs(fs_id) fs_child_dependancy_rm(fs_id) delete_fs(fs_id) def test_nfs(cap, system_id): if test_fs_pool_id: pool_id = test_fs_pool_id else: pool_id = name_to_id(OP_POOL, test_pool_name) if cap['FS_CREATE'] and cap['EXPORT_FS'] and cap['EXPORT_REMOVE']: fs_id = fs_create(pool_id) export_id = export_fs(fs_id) test_display(cap, system_id) un_export_fs(export_id) delete_fs(fs_id) def test_mapping(cap, system_id): pool_id = name_to_id(OP_POOL, test_pool_name) iqn1 = random_iqn() iqn2 = random_iqn() if cap['ACCESS_GROUP_CREATE_ISCSI_IQN']: ag_id = access_group_create(iqn1, system_id) if cap['VOLUME_ISCSI_CHAP_AUTHENTICATION']: initiator_chap(iqn1) if cap['ACCESS_GROUP_INITIATOR_ADD_ISCSI_IQN']: access_group_initiator_add(ag_id, iqn2) if cap['VOLUME_MASK'] and cap['VOLUME_UNMASK']: vol_id = create_volume(pool_id) volume_mask(ag_id, vol_id) test_display(cap, system_id) if cap['VOLUMES_ACCESSIBLE_BY_ACCESS_GROUP']: volumes_accessible_by_access_group(ag_id) if cap['ACCESS_GROUPS_GRANTED_TO_VOLUME']: access_groups_granted_to_volume(vol_id) if cap['VOLUME_UNMASK']: volume_unmask(ag_id, vol_id) if cap['VOLUME_DELETE']: volume_delete(vol_id) if cap['ACCESS_GROUP_INITIATOR_DELETE']: access_group_remove_init(ag_id, iqn1) if cap['ACCESS_GROUP_DELETE']: access_group_delete(ag_id) def test_nfs_operations(cap, system_id): pass def test_plugin_info(cap, system_id): out = call([cmd, 'plugin-info', ])[1] out = call([cmd, '-t' + sep, 'plugin-info', ])[1] def test_plugin_list(cap, system_id): out = call([cmd, 'list', '--type', 'PLUGINS'])[1] out = call([cmd, '-t' + sep, 'list', '--type', 'PLUGINS'])[1] def test_error_paths(cap, system_id): # Generate bad argument exception call([cmd, 'list', '--type', 'SNAPSHOTS'], 2) call([cmd, 'list', '--type', 'SNAPSHOTS', '--fs', 'DOES_NOT_EXIST'], 2) def create_all(cap, system_id): test_plugin_info(cap, system_id) test_block_creation(cap, system_id) test_fs_creation(cap, system_id) test_nfs(cap, system_id) def search_test(cap, system_id): print "\nTesting query with search ID\n" sys_id_filter = "--sys='%s'" % system_id if test_fs_pool_id: pool_id = test_fs_pool_id else: pool_id = name_to_id(OP_POOL, test_pool_name) pool_id_filter = "--pool='%s'" % pool_id vol_id = create_volume(pool_id) vol_id_filter = "--vol='%s'" % vol_id disk_id_filter = "--disk='%s'" % test_disk_id ag_id = access_group_create(random_iqn(), system_id) ag_id_filter = "--ag='%s'" % ag_id fs_id = fs_create(pool_id) fs_id_filter = "--fs='%s'" % fs_id nfs_export_id = export_fs(fs_id) nfs_export_id_filter = "--nfs-export='%s'" % nfs_export_id all_filters = [sys_id_filter, pool_id_filter, vol_id_filter, disk_id_filter, ag_id_filter, fs_id_filter, nfs_export_id_filter] supported = { 'pools': [sys_id_filter, pool_id_filter], 'volumes': [sys_id_filter, pool_id_filter, vol_id_filter, ag_id_filter], 'disks': [sys_id_filter, disk_id_filter], 'access_groups': [sys_id_filter, ag_id_filter, vol_id_filter], 'fs': [sys_id_filter, pool_id_filter, fs_id_filter], 'exports': [fs_id_filter, nfs_export_id_filter], } for resouce_type in supported.keys(): for cur_filter in all_filters: if cur_filter in supported[resouce_type]: call([cmd, 'list', '--type', resouce_type, cur_filter]) else: call([cmd, 'list', '--type', resouce_type, cur_filter], 2) un_export_fs(nfs_export_id) delete_fs(fs_id) access_group_delete(ag_id) volume_delete(vol_id) return def volume_raid_info_test(cap, system_id): if cap['VOLUME_RAID_INFO'] and cap['VOLUME_CREATE']: test_pool_id = name_to_id(OP_POOL, test_pool_name) if test_pool_id is None: print 'Pool %s is not available!' % test_pool_name exit(10) vol_id = create_volume(test_pool_id) out = call([cmd, '-t' + sep, 'volume-raid-info', '--vol', vol_id])[1] r = parse(out) if len(r[0]) != 6: print "volume-raid-info got expected output: %s" % out exit(10) if r[0][0] != vol_id: print "volume-raid-info output volume ID is not requested " \ "volume ID %s" % out exit(10) return def pool_member_info_test(cap, system_id): if cap['POOL_MEMBER_INFO']: out = call([cmd, '-t' + sep, 'list', '--type', 'POOLS'])[1] pool_list = parse(out) for pool in pool_list: out = call( [cmd, '-t' + sep, 'pool-member-info', '--pool', pool[0]])[1] r = parse(out) if len(r[0]) != 4: print "pool-member-info got expected output: %s" % out exit(10) if r[0][0] != pool[0]: print "pool-member-info output pool ID is not requested " \ "pool ID %s" % out exit(10) return def volume_raid_create_test(cap, system_id): if cap['VOLUME_RAID_CREATE']: out = call( [cmd, '-t' + sep, 'volume-create-raid-cap', '--sys', system_id])[1] if 'RAID1' not in [r[1] for r in parse(out)]: return out = call([cmd, '-t' + sep, 'list', '--type', 'disks'])[1] free_disk_ids = [] disk_list = parse(out) for disk in disk_list: if 'Free' in disk: if len(free_disk_ids) == 2: break free_disk_ids.append(disk[0]) if len(free_disk_ids) != 2: print "Require two free disks to test volume-create-raid" exit(10) out = call([ cmd, '-t' + sep, 'volume-create-raid', '--disk', free_disk_ids[0], '--disk', free_disk_ids[1], '--name', 'test_volume_raid_create', '--raid-type', 'raid1'])[1] volume = parse(out) vol_id = volume[0][0] pool_id = volume[0][-2] if cap['VOLUME_RAID_INFO']: out = call( [cmd, '-t' + sep, 'volume-raid-info', '--vol', vol_id])[1] if parse(out)[0][1] != 'RAID1': print "New volume is not RAID 1" exit(10) if cap['POOL_MEMBER_INFO']: out = call( [cmd, '-t' + sep, 'pool-member-info', '--pool', pool_id])[1] if parse(out)[0][1] != 'RAID1': print "New pool is not RAID 1" exit(10) for disk_id in free_disk_ids: if disk_id not in [p[3] for p in parse(out)]: print "New pool does not contain requested disks" exit(10) if cap['VOLUME_DELETE']: volume_delete(vol_id) return def run_all_tests(cap, system_id): test_display(cap, system_id) test_plugin_list(cap, system_id) test_error_paths(cap, system_id) create_all(cap, system_id) test_mapping(cap, system_id) search_test(cap, system_id) volume_raid_info_test(cap, system_id) pool_member_info_test(cap, system_id) volume_raid_create_test(cap, system_id) if __name__ == "__main__": parser = OptionParser() parser.add_option("-c", "--command", action="store", type="string", dest="cmd", help="specific command line to test") parser.add_option("-p", "--pool", action="store", dest="pool_name", default='lsm_test_aggr', help="pool name to use for testing") parser.add_option("-f", "--fspool", action="store", dest="fs_pool_id", default='', help="fs pool id to use for testing") parser.description = "lsmcli command line test tool" (options, args) = parser.parse_args() if options.cmd is None: print 'Please specify which lsmcli to test using -c or --command' sys.exit(1) else: cmd = options.cmd test_pool_name = options.pool_name if options.fs_pool_id: test_fs_pool_id = options.fs_pool_id #Theory of testing. # For each system that is available to us: # Query capabilities # Query all supported query operations (should have more to query) # # Create objects of every supported type # Query all supported query operations # (should have more to query), # run though different options making sure nothing explodes! # # Try calling un-supported operations and expect them to fail systems = get_systems() for system in systems: c = capabilities(system[ID]) run_all_tests(c, system[ID]) libstoragemgmt-1.2.3/tools/0000775000175000017500000000000012542455463012712 500000000000000libstoragemgmt-1.2.3/tools/utility/0000775000175000017500000000000012542455463014415 500000000000000libstoragemgmt-1.2.3/tools/utility/Makefile.am0000664000175000017500000000003412537546123016364 00000000000000EXTRA_DIST = check_const.pl libstoragemgmt-1.2.3/tools/utility/Makefile.in0000664000175000017500000003152112542455445016404 00000000000000# Makefile.in generated by automake 1.13.4 from Makefile.am. # @configure_input@ # Copyright (C) 1994-2013 Free Software Foundation, Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__is_gnu_make = test -n '$(MAKEFILE_LIST)' && test -n '$(MAKELEVEL)' am__make_running_with_option = \ case $${target_option-} in \ ?) ;; \ *) echo "am__make_running_with_option: internal error: invalid" \ "target option '$${target_option-}' specified" >&2; \ exit 1;; \ esac; \ has_opt=no; \ sane_makeflags=$$MAKEFLAGS; \ if $(am__is_gnu_make); then \ sane_makeflags=$$MFLAGS; \ else \ case $$MAKEFLAGS in \ *\\[\ \ ]*) \ bs=\\; \ sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \ | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \ esac; \ fi; \ skip_next=no; \ strip_trailopt () \ { \ flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \ }; \ for flg in $$sane_makeflags; do \ test $$skip_next = yes && { skip_next=no; continue; }; \ case $$flg in \ *=*|--*) continue;; \ -*I) strip_trailopt 'I'; skip_next=yes;; \ -*I?*) strip_trailopt 'I';; \ -*O) strip_trailopt 'O'; skip_next=yes;; \ -*O?*) strip_trailopt 'O';; \ -*l) strip_trailopt 'l'; skip_next=yes;; \ -*l?*) strip_trailopt 'l';; \ -[dEDm]) skip_next=yes;; \ -[JT]) skip_next=yes;; \ esac; \ case $$flg in \ *$$target_option*) has_opt=yes; break;; \ esac; \ done; \ test $$has_opt = yes am__make_dryrun = (target_option=n; $(am__make_running_with_option)) am__make_keepgoing = (target_option=k; $(am__make_running_with_option)) pkgdatadir = $(datadir)/@PACKAGE@ pkgincludedir = $(includedir)/@PACKAGE@ pkglibdir = $(libdir)/@PACKAGE@ pkglibexecdir = $(libexecdir)/@PACKAGE@ am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ subdir = tools/utility DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/Makefile.am ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = $(top_srcdir)/m4/ax_python_module.m4 \ $(top_srcdir)/m4/libtool.m4 $(top_srcdir)/m4/ltoptions.m4 \ $(top_srcdir)/m4/ltsugar.m4 $(top_srcdir)/m4/ltversion.m4 \ $(top_srcdir)/m4/lt~obsolete.m4 $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) mkinstalldirs = $(install_sh) -d CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = CONFIG_CLEAN_VPATH_FILES = AM_V_P = $(am__v_P_@AM_V@) am__v_P_ = $(am__v_P_@AM_DEFAULT_V@) am__v_P_0 = false am__v_P_1 = : AM_V_GEN = $(am__v_GEN_@AM_V@) am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@) am__v_GEN_0 = @echo " GEN " $@; am__v_GEN_1 = AM_V_at = $(am__v_at_@AM_V@) am__v_at_ = $(am__v_at_@AM_DEFAULT_V@) am__v_at_0 = @ am__v_at_1 = SOURCES = DIST_SOURCES = am__can_run_installinfo = \ case $$AM_UPDATE_INFO_DIR in \ n|no|NO) false;; \ *) (install-info --version) >/dev/null 2>&1;; \ esac am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) ACLOCAL = @ACLOCAL@ AMTAR = @AMTAR@ AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@ AR = @AR@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ FGREP = @FGREP@ GREP = @GREP@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ JSON_CFLAGS = @JSON_CFLAGS@ JSON_LIBS = @JSON_LIBS@ LD = @LD@ LDFLAGS = @LDFLAGS@ LIBCHECK_CFLAGS = @LIBCHECK_CFLAGS@ LIBCHECK_LIBS = @LIBCHECK_LIBS@ LIBCONFIG_CFLAGS = @LIBCONFIG_CFLAGS@ LIBCONFIG_LIBS = @LIBCONFIG_LIBS@ LIBGLIB_CFLAGS = @LIBGLIB_CFLAGS@ LIBGLIB_LIBS = @LIBGLIB_LIBS@ LIBMICROHTTPD_CFLAGS = @LIBMICROHTTPD_CFLAGS@ LIBMICROHTTPD_LIBS = @LIBMICROHTTPD_LIBS@ LIBOBJS = @LIBOBJS@ LIBS = @LIBS@ LIBSM_LIBTOOL_VERSION = @LIBSM_LIBTOOL_VERSION@ LIBSM_MAJOR_VERSION = @LIBSM_MAJOR_VERSION@ LIBSM_MICRO_VERSION = @LIBSM_MICRO_VERSION@ LIBSM_MINOR_VERSION = @LIBSM_MINOR_VERSION@ LIBSM_VERSION = @LIBSM_VERSION@ LIBSM_VERSION_INFO = @LIBSM_VERSION_INFO@ LIBSM_VERSION_NUMBER = @LIBSM_VERSION_NUMBER@ LIBTOOL = @LIBTOOL@ LIBXML_CFLAGS = @LIBXML_CFLAGS@ LIBXML_LIBS = @LIBXML_LIBS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBOBJS = @LTLIBOBJS@ MAINT = @MAINT@ MAKEINFO = @MAKEINFO@ MANIFEST_TOOL = @MANIFEST_TOOL@ MKDIR_P = @MKDIR_P@ NM = @NM@ NMEDIT = @NMEDIT@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_URL = @PACKAGE_URL@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PERL = @PERL@ PKG_CONFIG = @PKG_CONFIG@ PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@ PKG_CONFIG_PATH = @PKG_CONFIG_PATH@ PYTHON = @PYTHON@ PYTHON_EXEC_PREFIX = @PYTHON_EXEC_PREFIX@ PYTHON_PLATFORM = @PYTHON_PLATFORM@ PYTHON_PREFIX = @PYTHON_PREFIX@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SSL_LIBS = @SSL_LIBS@ STRIP = @STRIP@ VERSION = @VERSION@ YAJL_LIBS = @YAJL_LIBS@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_AR = @ac_ct_AR@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ bashcompletiondir = @bashcompletiondir@ bindir = @bindir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ datadir = @datadir@ datarootdir = @datarootdir@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ install_sh = @install_sh@ libdir = @libdir@ libexecdir = @libexecdir@ localedir = @localedir@ localstatedir = @localstatedir@ mandir = @mandir@ mkdir_p = @mkdir_p@ oldincludedir = @oldincludedir@ pdfdir = @pdfdir@ pkgpyexecdir = @pkgpyexecdir@ pkgpythondir = @pkgpythondir@ prefix = @prefix@ program_transform_name = @program_transform_name@ psdir = @psdir@ pyexecdir = @pyexecdir@ pythondir = @pythondir@ sbindir = @sbindir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ systemdsystemunitdir = @systemdsystemunitdir@ target_alias = @target_alias@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ EXTRA_DIST = check_const.pl all: all-am .SUFFIXES: $(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu tools/utility/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --gnu tools/utility/Makefile .PRECIOUS: Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs tags TAGS: ctags CTAGS: cscope cscopelist: distdir: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done check-am: all-am check: check-am all-am: Makefile installdirs: install: install-am install-exec: install-exec-am install-data: install-data-am uninstall: uninstall-am install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-am install-strip: if test -z '$(STRIP)'; then \ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ install; \ else \ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ fi mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-am clean-am: clean-generic clean-libtool mostlyclean-am distclean: distclean-am -rm -f Makefile distclean-am: clean-am distclean-generic dvi: dvi-am dvi-am: html: html-am html-am: info: info-am info-am: install-data-am: install-dvi: install-dvi-am install-dvi-am: install-exec-am: install-html: install-html-am install-html-am: install-info: install-info-am install-info-am: install-man: install-pdf: install-pdf-am install-pdf-am: install-ps: install-ps-am install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-am -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-am mostlyclean-am: mostlyclean-generic mostlyclean-libtool pdf: pdf-am pdf-am: ps: ps-am ps-am: uninstall-am: .MAKE: install-am install-strip .PHONY: all all-am check check-am clean clean-generic clean-libtool \ cscopelist-am ctags-am distclean distclean-generic \ distclean-libtool distdir dvi dvi-am html html-am info info-am \ install install-am install-data install-data-am install-dvi \ install-dvi-am install-exec install-exec-am install-html \ install-html-am install-info install-info-am install-man \ install-pdf install-pdf-am install-ps install-ps-am \ install-strip installcheck installcheck-am installdirs \ maintainer-clean maintainer-clean-generic mostlyclean \ mostlyclean-generic mostlyclean-libtool pdf pdf-am ps ps-am \ tags-am uninstall uninstall-am # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: libstoragemgmt-1.2.3/tools/utility/check_const.pl0000664000175000017500000003254412537737032017164 00000000000000#!/usr/bin/perl # Copyright (C) 2014 Red Hat, Inc. # This library is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2.1 of the License, or any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library; If not, see . # # Author: Gris Ge # # This script compare public constants of lsm Python library files with C # library include files. # Naming scheme: # py_name # Constant name used in Python, example: # # 'lsm.System.STATUS_OK' # # py_value # The value of python constant. # # c_name # Constant name used in C, example: # # 'LSM_SYSTEM_STATUS_OK' # # c_value # The value of C constant. We stored the raw string. use strict; use warnings; use File::Basename; use Cwd 'abs_path'; use Data::Dumper; my $LSM_CODE_BASE_DIR = dirname( dirname( dirname( abs_path($0) ) ) ); my $PYTHON_LIB_DIR = "$LSM_CODE_BASE_DIR/python_binding/lsm"; my $C_LIB_HEADER = "$LSM_CODE_BASE_DIR" . "/c_binding/include/libstoragemgmt/libstoragemgmt.h"; my $REGEX_VALUE_FORMAT = qr/ (?(?&NUM_PAT)) (?(DEFINE) # integer number (? [0-9]+ ) # Bit shift: # 1 << 9 (? 1 [\ \t]+ << [\ \t]+ [0-9]+ ) # Hex number (? 0x[0-9]+ ) (? (?&NUM_BIT_SHIFT) | (?&NUM_HEX) | (?&NUM_INT) ) ) /x; my $REGEX_C_CONST_FORMAT = qr/ ^ (?: (?&HEADER_PAT)) (?(?&CNAME_PAT)) (?: (?&SPLITER_PAT)) (?(?&NUM_PAT)) (?(DEFINE) # integer number (? [0-9]+ ) # Bit shift: # 1 << 9 (? 1 [\ \t]+ << [\ \t]+ [0-9]+ ) # Hex number # 0x0000000000000001 (? 0x[0-9]+ ) (? (?&NUM_BIT_SHIFT) | (?&NUM_HEX) | (?&NUM_INT) ) (? [A-Z][A-Z_0-9]+ ) (? [\ \t]* ) (? \#define[\ \t]+ ) (? (?&HEADER1) | (?&HEADER2) ) (? [\ \t]* [=]* [\ \t]* ) ) /x; my %PY_CLASS_NAME_CONV = ( 'Capabilities' => 'CAP', 'ErrorNumber' => 'ERR', 'JobStatus' => 'JOB', 'ErrorLevel' => 'ERR_LEVEL', ); my $REF_RESULT = { 'pass' => {}, 'fail' => {}, 'c_missing' => {}, 'py_missing' => {}, 'c_const_hash' => {}, 'py_const_hash' => {}, 'known_c_to_py_name' => {}, 'known_py_to_c_name' => {}, }; # $REF_RESULT = { # 'pass' => { # $py_name => 1, # Just for deduplication. # }, # 'fail' => { # $py_name => 1, # Just for deduplication. # }, # 'c_missing' => { # $py_name => 1, # }, # 'py_missing' => { # $c_name => 1, # }, # 'py_const_hash' => { # $py_name => $py_value, # }, # 'c_const_hash' => { # $c_name => $c_value, # }, # 'known_c_to_py_name' => { # $c_name => $py_name, # } # } # $|++; sub is_in_array($$) { my $ref_array = shift; my $item = shift; return 1 if grep { $_ eq $item } @{$ref_array}; return undef; } sub py_name_2_c_name($) { # We do these conversion: # 1. Convert CaMel to CA_MEL # 2. Convert System to SYSTEM # 3. Convert Capabilities to CAP and etc using %PY_CLASS_NAME_CONV; my $py_name = shift; if ( $py_name =~ /^lsm\.([a-zA-Z]+)\.([A-Z_][A-Z_0-9]+)$/ ) { my $py_class_name = $1; my $py_var_name = $2; # Convert camel class name if (defined $PY_CLASS_NAME_CONV{$py_class_name}){ return sprintf "LSM_%s_%s", $PY_CLASS_NAME_CONV{$py_class_name}, $py_var_name; } if ( $py_class_name =~ /^[A-Z][a-z]+$/ ) { $py_class_name =~ tr/[a-z]/[A-Z]/; return sprintf "LSM_%s_%s", $py_class_name, $py_var_name; } if ( $py_class_name =~ /^([A-Z][a-z]+)([A-Z][a-z]+)$/ ) { $py_class_name = sprintf "%s_%s", $1, $2; $py_class_name =~ tr/[a-z]/[A-Z]/; return sprintf "LSM_%s_%s", $py_class_name, $py_var_name; } } die "FAIL: Ilegal python constant name '$py_name'.\n"; } sub _parse_c_init_header($){ # Take initial C header file and read its sub header files # Return a reference of array containing file path. my $init_header = shift; my $folder_path = dirname($init_header); open my $init_header_fd, "<", $init_header or die "FAIL: Failed to open $init_header $!\n"; my @rc = (); map{ push @rc, "$folder_path/$1" if /#include "([^"]+)"/; }<$init_header_fd>; return \@rc; } sub _get_c_constants($){ my $c_header = shift; open my $c_header_fd, "<", $c_header or die "FAIL: Failed to open $c_header $!\n"; my %rc = (); map{ $rc{$+{'CNAME'}} = $+{'NUM'} if /$REGEX_C_CONST_FORMAT/; }<$c_header_fd>; return \%rc; } sub parse_out_c_const() { # Return a reference like this: # { # $c_name => $value, # } my $ref_sub_c_headers = _parse_c_init_header($C_LIB_HEADER); my $ref_c_name_2_value = {}; foreach my $cur_c_header (@{$ref_sub_c_headers}){ my $ref_tmp = _get_c_constants($cur_c_header); foreach my $key_name (keys %{$ref_tmp}){ $ref_c_name_2_value->{$key_name} = $ref_tmp->{$key_name}; } } return $ref_c_name_2_value; } sub _parse_py_init_file($) { # Return a reference of array containging file path of sub python module. my $init_file = shift; open my $init_fd, "<", $init_file or die "FAIL: Failed to open $init_file: $!\n"; my $folder_path = dirname($init_file); my @rc1 = (); my @rc2 = (); my @lines = (); # Merge multiline codes foreach my $line (<$init_fd>) { chomp $line; if ( $line =~ /^[^ ]/ ) { push @lines, $line; } else { $lines[-1] .= $line; } } close $init_fd; foreach my $line (@lines) { if ( $line =~ /from ([^ ]+) import (.+)$/ ) { push @rc1, sprintf "%s/%s.py", $folder_path, $1; my $class_line = $2; while ( $class_line =~ /([A-Z][a-zA-Z]+)[, \\]*/g ) { push @rc2, $1; } } } return \@rc1, \@rc2; } sub _get_py_class_consts($$){ # Take $file_path and $ref_classes # Return reference of hash: # { # $py_name => $value, # } my $py_file = shift; my $ref_classes = shift; open my $py_fd, "<", $py_file or die "FAIL: Failed to open $py_file: $!\n"; my %rc_hash = (); my $cur_class_name = undef; my $current_idention = undef; foreach my $line (<$py_fd>){ chomp $line; if ($line =~ /^([ ]*)class[ ]+([^\(]+)\(/){ $current_idention = $1; $cur_class_name = $2; unless (is_in_array($ref_classes, $cur_class_name)){ $cur_class_name = undef; next; } } unless(defined $cur_class_name){ next; } if ($line =~ /^$current_idention [\ ]+ ([A-Z][A-Z\_0-9]+) [\ ]*=[\ ]* ($REGEX_VALUE_FORMAT)/x){ my $var_name = $1; my $py_value = $2; my $py_name = sprintf "lsm.%s.%s", $cur_class_name, $var_name; $rc_hash{$py_name} = $py_value; } } close $py_fd; return \%rc_hash; } sub parse_out_py_const() { # Return a reference like this: # { # $py_name => $value, # } my ( $ref_sub_files, $ref_classes ) = _parse_py_init_file("$PYTHON_LIB_DIR/__init__.py"); my $ref_py_name_2_value = {}; foreach my $cur_py_file (@{$ref_sub_files}){ my $ref_tmp = _get_py_class_consts($cur_py_file, $ref_classes); foreach my $key_name (keys %{$ref_tmp}){ $ref_py_name_2_value->{$key_name} = $ref_tmp->{$key_name}; } } return $ref_py_name_2_value; } sub value_str_to_int($) { my $raw_value = shift; unless ( defined $raw_value ) { return undef; } if ( $raw_value =~ /^[0-9]+$/ ) { return $raw_value; } if ( $raw_value =~ /^0x[0-9]+$/ ) { return hex $raw_value; } if ( $raw_value =~ /^([0-9]+) +<< +([0-9]+)$/ ) { return $1 << $2; } die "FAIL: Failed to convert $raw_value to integer\n"; } sub record_result($$$$) { # Take ($py_name, $py_value, $c_name, $c_value) # Update $REF_RESULT my $py_name = shift; my $py_value = shift; my $c_name = shift; my $c_value = shift; my $real_py_value = undef; my $real_c_value = undef; if ( ( defined $py_name ) && ( defined $py_value ) ) { $real_py_value = value_str_to_int($py_value); $REF_RESULT->{'py_const_hash'}->{$py_name} = sprintf "%s(%s)", $py_value, $real_py_value; } if ( ( defined $c_name ) && ( defined $c_value ) ) { $real_c_value = value_str_to_int($c_value); $REF_RESULT->{'c_const_hash'}->{$c_name} = sprintf "%s(%s)", $c_value, $real_c_value; } unless ($py_name) { my $known_py_name = $REF_RESULT->{'known_c_to_py_name'}->{$c_name}; return 1 if $known_py_name; # Already checked. $REF_RESULT->{'py_missing'}->{$c_name} = 'unknown'; return 1; } unless ($c_name) { # ilegal python variable name, result already updated by # py_name_2_c_name() return 1; } $REF_RESULT->{'known_c_to_py_name'}->{$c_name} = $py_name; $REF_RESULT->{'known_py_to_c_name'}->{$py_name} = $c_name; unless ( defined $py_value ) { # value for py_value will never be undef, just in case. $REF_RESULT->{'py_missing'}->{$c_name} = $py_value; return 1; } unless ( defined $c_value ) { $REF_RESULT->{'c_missing'}->{$py_name} = $c_name; return 1; } if ( $real_py_value == $real_c_value ) { $REF_RESULT->{'pass'}->{$py_name} = 1; } else { $REF_RESULT->{'fail'}->{$py_name} = 1; } 1; } sub show_result() { my $format = "%-10s%-60s %s\n"; my @pass_py_names = sort keys %{ $REF_RESULT->{'pass'} }; my @fail_py_names = sort keys %{ $REF_RESULT->{'fail'} }; my @py_missing_c_names = sort keys %{ $REF_RESULT->{'py_missing'} }; my @c_missing_py_names = sort keys %{ $REF_RESULT->{'c_missing'} }; my $ref_py_name_2_c_name = $REF_RESULT->{'known_py_to_c_name'}; my $ref_py_name_2_value = $REF_RESULT->{'py_const_hash'}; my $ref_c_name_2_value = $REF_RESULT->{'c_const_hash'}; # Header printf $format, '#'x8, 'Name', 'Value'; print "\n"; foreach my $py_name (@pass_py_names) { my $py_value = $ref_py_name_2_value->{$py_name}; my $c_name = $ref_py_name_2_c_name->{$py_name}; my $c_value = $ref_c_name_2_value->{$c_name}; printf ($format, "PASS", $py_name, $py_value); printf ($format, " ", $c_name, $c_value); } foreach my $c_name (@py_missing_c_names) { my $py_name = '-' x 8; my $py_value = '-' x 8; my $c_value = $ref_c_name_2_value->{$c_name}; printf ($format, "PY_MISS", $c_name, $c_value); } foreach my $py_name (@c_missing_py_names) { my $c_name = '-' x 8; my $c_value = '-' x 8; my $py_value = $ref_py_name_2_value->{$py_name}; printf ($format, "C_MISS", $py_name, $py_value); } foreach my $py_name (@fail_py_names) { my $py_value = $ref_py_name_2_value->{$py_name}; my $c_name = $ref_py_name_2_c_name->{$py_name}; my $c_value = $ref_c_name_2_value->{$c_name}; printf ($format, "FAIL", $py_name, $py_value); printf ($format, " ", $c_name, $c_value); } 1; } sub main() { my $ref_py_const_hash = parse_out_py_const(); my $ref_c_const_hash = parse_out_c_const(); map { my $py_name = $_; my $c_name = py_name_2_c_name($py_name); record_result( $py_name, $ref_py_const_hash->{$py_name}, $c_name, $ref_c_const_hash->{$c_name} ) } keys %{$ref_py_const_hash}; map { my $c_name = $_; # We don't have a way to convert C constant name to python one. # We just treat all C constant as missing if not marked by previous # check. record_result( undef, undef, $c_name, $ref_c_const_hash->{$c_name} ) } keys %{$ref_c_const_hash}; show_result(); exit 1 if ( %{ $REF_RESULT->{'fail'} } || %{ $REF_RESULT->{'c_missing'} } || %{ $REF_RESULT->{'py_missing'} } ); exit 0; } main(); libstoragemgmt-1.2.3/tools/udev/0000775000175000017500000000000012542455463013655 500000000000000libstoragemgmt-1.2.3/tools/udev/scan-scsi-target.c0000664000175000017500000001277112537546123017116 00000000000000/* * Scan a SCSI target given a uevent path to one of its devices * Author: Ewan D. Milne * * Copyright (C) 2013, Red Hat Inc. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. */ #include #include #include #include #include #include #include #include #include /* * Example SCSI uevent device path: * * /devices/pseudo_0/adapter0/host3/target3:0:0/3:0:0:0 * * Desired sysfs action: * * write " -" to "/sys/devices/pseudo_0/adapter0/host3/scsi_host/host3/scan" * * Note: Per kernel Documentation/sysfs-rules.txt, sysfs is always mounted at /sys */ static void __attribute__ ((__noreturn__)) usage(char **argv, int err) { fprintf(stderr, "\nUsage:\n"); fprintf(stderr, "%s \n", argv[0]); fprintf(stderr, "\nOptions:\n"); fprintf(stderr, " -h, --help display this help and exit\n"); exit(err); } static void __attribute__ ((__noreturn__)) invalid(char **argv, char *devpath) { fprintf(stderr, "Invalid DEVPATH '%s'.\n", devpath); usage(argv, 1); } int main(int argc, char **argv) { char c; char *devpath; char *sysfs_path; char *sysfs_data; struct stat sysfs_stat; int fd; char *host_str; int host_pos; int host_len; char *host_next_str; int host_next_pos; int host_next_len; char *target_str; int target_pos; int target_len; char *channel_str; int channel_pos; int channel_len; char *id_str; int id_pos; int id_len; char *dir_str; static const struct option longopts[] = { {"help", no_argument, 0, 'h'}, {NULL, no_argument, 0, '0'}, }; while ((c = getopt_long(argc, argv, "rh", longopts, NULL)) != -1) { switch (c) { case 'h': usage(argv, 0); default: usage(argv, 1); } } if (optind >= argc) { usage(argv, 1); } devpath = argv[optind++]; /* * Make sure SCSI device uevent DEVPATH was supplied, and that it exists. * Also verify that it is a directory, to provide some argument validation. * Note: the devpath does not include the "/sys" prefix, so we must add it. */ if (devpath == NULL) { usage(argv, 1); } sysfs_path = malloc(strlen("/sys") + strlen(devpath) + 1); strcpy(sysfs_path, "/sys"); strcat(sysfs_path, devpath); if (stat(sysfs_path, &sysfs_stat) < 0) { fprintf(stderr, "Cannot stat '%s': %s\n", sysfs_path, strerror(errno)); usage(argv, 1); } if (!S_ISDIR(sysfs_stat.st_mode)) invalid(argv, devpath); free(sysfs_path); /* * Construct the path to the "scan" entry in the Scsi_Host sysfs object. */ if ((host_str = strstr(devpath, "/host")) == NULL) invalid(argv, devpath); host_pos = strlen(devpath) - strlen(host_str); if ((host_next_str = strstr(&devpath[host_pos + 1], "/")) == NULL) invalid(argv, devpath); host_next_pos = strlen(devpath) - strlen(host_next_str); if ((target_str = strstr(devpath, "/target")) == NULL) invalid(argv, devpath); target_pos = strlen(devpath) - strlen(target_str); host_len = host_next_pos - host_pos; if (host_len <= strlen("/host")) invalid(argv, devpath); host_next_len = strlen(&devpath[host_next_pos]); if (host_next_len <= strlen("/")) invalid(argv, devpath); target_len = strlen(&devpath[target_pos]); if (target_len <= strlen("/target")) invalid(argv, devpath); sysfs_path = malloc(strlen("/sys") + strlen(devpath) - host_next_len + strlen("/scsi_host") + host_len + strlen("/scan") + 1); strcpy(sysfs_path, "/sys"); strncat(sysfs_path, devpath, host_next_pos); strcat(sysfs_path, "/scsi_host"); strncat(sysfs_path, host_str, host_len); strcat(sysfs_path, "/scan"); /* * Obtain the SCSI channel and ID, and construct the string to write to the "scan" entry. */ if ((channel_str = strstr(&devpath[target_pos], ":")) == NULL) invalid(argv, devpath); channel_pos = strlen(&devpath[target_pos]) - strlen(channel_str) + 1; if ((id_str = strstr(&devpath[target_pos + channel_pos], ":")) == NULL) invalid(argv, devpath); id_pos = strlen(&devpath[target_pos + channel_pos]) - strlen(id_str) + 1; if ((dir_str = strstr(&devpath[target_pos + channel_pos + id_pos], "/")) == NULL) invalid(argv, devpath); channel_len = strlen(&devpath[target_pos + channel_pos]) - strlen(id_str); if (channel_len < 1) invalid(argv, devpath); id_len = strlen(&devpath[target_pos + channel_pos + id_pos]) - strlen(dir_str); if (id_len < 1) invalid(argv, devpath); sysfs_data = malloc(channel_len + strlen(" ") + id_len + strlen(" -") + 1); sysfs_data[0] = '\0'; strncat(sysfs_data, &devpath[target_pos + channel_pos], channel_len); strcat(sysfs_data, " "); strncat(sysfs_data, &devpath[target_pos + channel_pos + id_pos], id_len); strcat(sysfs_data, " -"); /* * Tell the kernel to rescan the SCSI target for new LUNs. */ if ((fd = open(sysfs_path, O_WRONLY)) < 0) { fprintf(stderr, "Cannot open '%s': %s\n", sysfs_path, strerror(errno)); usage(argv, 1); } if (write(fd, sysfs_data, strlen(sysfs_data)) < 0) { fprintf(stderr, "Cannot write '%s': %s\n", sysfs_path, strerror(errno)); usage(argv, 1); } close(fd); free(sysfs_path); free(sysfs_data); return 0; } libstoragemgmt-1.2.3/tools/udev/Makefile.am0000664000175000017500000000016112537546123015625 00000000000000EXTRA_DIST = 90-scsi-ua.rules noinst_PROGRAMS = scan-scsi-target scan_scsi_target_SOURCES = scan-scsi-target.c libstoragemgmt-1.2.3/tools/udev/Makefile.in0000664000175000017500000004476612542455445015663 00000000000000# Makefile.in generated by automake 1.13.4 from Makefile.am. # @configure_input@ # Copyright (C) 1994-2013 Free Software Foundation, Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__is_gnu_make = test -n '$(MAKEFILE_LIST)' && test -n '$(MAKELEVEL)' am__make_running_with_option = \ case $${target_option-} in \ ?) ;; \ *) echo "am__make_running_with_option: internal error: invalid" \ "target option '$${target_option-}' specified" >&2; \ exit 1;; \ esac; \ has_opt=no; \ sane_makeflags=$$MAKEFLAGS; \ if $(am__is_gnu_make); then \ sane_makeflags=$$MFLAGS; \ else \ case $$MAKEFLAGS in \ *\\[\ \ ]*) \ bs=\\; \ sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \ | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \ esac; \ fi; \ skip_next=no; \ strip_trailopt () \ { \ flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \ }; \ for flg in $$sane_makeflags; do \ test $$skip_next = yes && { skip_next=no; continue; }; \ case $$flg in \ *=*|--*) continue;; \ -*I) strip_trailopt 'I'; skip_next=yes;; \ -*I?*) strip_trailopt 'I';; \ -*O) strip_trailopt 'O'; skip_next=yes;; \ -*O?*) strip_trailopt 'O';; \ -*l) strip_trailopt 'l'; skip_next=yes;; \ -*l?*) strip_trailopt 'l';; \ -[dEDm]) skip_next=yes;; \ -[JT]) skip_next=yes;; \ esac; \ case $$flg in \ *$$target_option*) has_opt=yes; break;; \ esac; \ done; \ test $$has_opt = yes am__make_dryrun = (target_option=n; $(am__make_running_with_option)) am__make_keepgoing = (target_option=k; $(am__make_running_with_option)) pkgdatadir = $(datadir)/@PACKAGE@ pkgincludedir = $(includedir)/@PACKAGE@ pkglibdir = $(libdir)/@PACKAGE@ pkglibexecdir = $(libexecdir)/@PACKAGE@ am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ noinst_PROGRAMS = scan-scsi-target$(EXEEXT) subdir = tools/udev DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/Makefile.am \ $(top_srcdir)/build-aux/depcomp ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = $(top_srcdir)/m4/ax_python_module.m4 \ $(top_srcdir)/m4/libtool.m4 $(top_srcdir)/m4/ltoptions.m4 \ $(top_srcdir)/m4/ltsugar.m4 $(top_srcdir)/m4/ltversion.m4 \ $(top_srcdir)/m4/lt~obsolete.m4 $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) mkinstalldirs = $(install_sh) -d CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = CONFIG_CLEAN_VPATH_FILES = PROGRAMS = $(noinst_PROGRAMS) am_scan_scsi_target_OBJECTS = scan-scsi-target.$(OBJEXT) scan_scsi_target_OBJECTS = $(am_scan_scsi_target_OBJECTS) scan_scsi_target_LDADD = $(LDADD) AM_V_lt = $(am__v_lt_@AM_V@) am__v_lt_ = $(am__v_lt_@AM_DEFAULT_V@) am__v_lt_0 = --silent am__v_lt_1 = AM_V_P = $(am__v_P_@AM_V@) am__v_P_ = $(am__v_P_@AM_DEFAULT_V@) am__v_P_0 = false am__v_P_1 = : AM_V_GEN = $(am__v_GEN_@AM_V@) am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@) am__v_GEN_0 = @echo " GEN " $@; am__v_GEN_1 = AM_V_at = $(am__v_at_@AM_V@) am__v_at_ = $(am__v_at_@AM_DEFAULT_V@) am__v_at_0 = @ am__v_at_1 = DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir) depcomp = $(SHELL) $(top_srcdir)/build-aux/depcomp am__depfiles_maybe = depfiles am__mv = mv -f COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \ $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) LTCOMPILE = $(LIBTOOL) $(AM_V_lt) --tag=CC $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) \ $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) \ $(AM_CFLAGS) $(CFLAGS) AM_V_CC = $(am__v_CC_@AM_V@) am__v_CC_ = $(am__v_CC_@AM_DEFAULT_V@) am__v_CC_0 = @echo " CC " $@; am__v_CC_1 = CCLD = $(CC) LINK = $(LIBTOOL) $(AM_V_lt) --tag=CC $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) \ $(AM_LDFLAGS) $(LDFLAGS) -o $@ AM_V_CCLD = $(am__v_CCLD_@AM_V@) am__v_CCLD_ = $(am__v_CCLD_@AM_DEFAULT_V@) am__v_CCLD_0 = @echo " CCLD " $@; am__v_CCLD_1 = SOURCES = $(scan_scsi_target_SOURCES) DIST_SOURCES = $(scan_scsi_target_SOURCES) am__can_run_installinfo = \ case $$AM_UPDATE_INFO_DIR in \ n|no|NO) false;; \ *) (install-info --version) >/dev/null 2>&1;; \ esac am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) # Read a list of newline-separated strings from the standard input, # and print each of them once, without duplicates. Input order is # *not* preserved. am__uniquify_input = $(AWK) '\ BEGIN { nonempty = 0; } \ { items[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in items) print i; }; } \ ' # Make sure the list of sources is unique. This is necessary because, # e.g., the same source file might be shared among _SOURCES variables # for different programs/libraries. am__define_uniq_tagged_files = \ list='$(am__tagged_files)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | $(am__uniquify_input)` ETAGS = etags CTAGS = ctags DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) ACLOCAL = @ACLOCAL@ AMTAR = @AMTAR@ AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@ AR = @AR@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ FGREP = @FGREP@ GREP = @GREP@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ JSON_CFLAGS = @JSON_CFLAGS@ JSON_LIBS = @JSON_LIBS@ LD = @LD@ LDFLAGS = @LDFLAGS@ LIBCHECK_CFLAGS = @LIBCHECK_CFLAGS@ LIBCHECK_LIBS = @LIBCHECK_LIBS@ LIBCONFIG_CFLAGS = @LIBCONFIG_CFLAGS@ LIBCONFIG_LIBS = @LIBCONFIG_LIBS@ LIBGLIB_CFLAGS = @LIBGLIB_CFLAGS@ LIBGLIB_LIBS = @LIBGLIB_LIBS@ LIBMICROHTTPD_CFLAGS = @LIBMICROHTTPD_CFLAGS@ LIBMICROHTTPD_LIBS = @LIBMICROHTTPD_LIBS@ LIBOBJS = @LIBOBJS@ LIBS = @LIBS@ LIBSM_LIBTOOL_VERSION = @LIBSM_LIBTOOL_VERSION@ LIBSM_MAJOR_VERSION = @LIBSM_MAJOR_VERSION@ LIBSM_MICRO_VERSION = @LIBSM_MICRO_VERSION@ LIBSM_MINOR_VERSION = @LIBSM_MINOR_VERSION@ LIBSM_VERSION = @LIBSM_VERSION@ LIBSM_VERSION_INFO = @LIBSM_VERSION_INFO@ LIBSM_VERSION_NUMBER = @LIBSM_VERSION_NUMBER@ LIBTOOL = @LIBTOOL@ LIBXML_CFLAGS = @LIBXML_CFLAGS@ LIBXML_LIBS = @LIBXML_LIBS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBOBJS = @LTLIBOBJS@ MAINT = @MAINT@ MAKEINFO = @MAKEINFO@ MANIFEST_TOOL = @MANIFEST_TOOL@ MKDIR_P = @MKDIR_P@ NM = @NM@ NMEDIT = @NMEDIT@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_URL = @PACKAGE_URL@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PERL = @PERL@ PKG_CONFIG = @PKG_CONFIG@ PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@ PKG_CONFIG_PATH = @PKG_CONFIG_PATH@ PYTHON = @PYTHON@ PYTHON_EXEC_PREFIX = @PYTHON_EXEC_PREFIX@ PYTHON_PLATFORM = @PYTHON_PLATFORM@ PYTHON_PREFIX = @PYTHON_PREFIX@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SSL_LIBS = @SSL_LIBS@ STRIP = @STRIP@ VERSION = @VERSION@ YAJL_LIBS = @YAJL_LIBS@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_AR = @ac_ct_AR@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ bashcompletiondir = @bashcompletiondir@ bindir = @bindir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ datadir = @datadir@ datarootdir = @datarootdir@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ install_sh = @install_sh@ libdir = @libdir@ libexecdir = @libexecdir@ localedir = @localedir@ localstatedir = @localstatedir@ mandir = @mandir@ mkdir_p = @mkdir_p@ oldincludedir = @oldincludedir@ pdfdir = @pdfdir@ pkgpyexecdir = @pkgpyexecdir@ pkgpythondir = @pkgpythondir@ prefix = @prefix@ program_transform_name = @program_transform_name@ psdir = @psdir@ pyexecdir = @pyexecdir@ pythondir = @pythondir@ sbindir = @sbindir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ systemdsystemunitdir = @systemdsystemunitdir@ target_alias = @target_alias@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ EXTRA_DIST = 90-scsi-ua.rules scan_scsi_target_SOURCES = scan-scsi-target.c all: all-am .SUFFIXES: .SUFFIXES: .c .lo .o .obj $(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu tools/udev/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --gnu tools/udev/Makefile .PRECIOUS: Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): clean-noinstPROGRAMS: @list='$(noinst_PROGRAMS)'; test -n "$$list" || exit 0; \ echo " rm -f" $$list; \ rm -f $$list || exit $$?; \ test -n "$(EXEEXT)" || exit 0; \ list=`for p in $$list; do echo "$$p"; done | sed 's/$(EXEEXT)$$//'`; \ echo " rm -f" $$list; \ rm -f $$list scan-scsi-target$(EXEEXT): $(scan_scsi_target_OBJECTS) $(scan_scsi_target_DEPENDENCIES) $(EXTRA_scan_scsi_target_DEPENDENCIES) @rm -f scan-scsi-target$(EXEEXT) $(AM_V_CCLD)$(LINK) $(scan_scsi_target_OBJECTS) $(scan_scsi_target_LDADD) $(LIBS) mostlyclean-compile: -rm -f *.$(OBJEXT) distclean-compile: -rm -f *.tab.c @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/scan-scsi-target.Po@am__quote@ .c.o: @am__fastdepCC_TRUE@ $(AM_V_CC)depbase=`echo $@ | sed 's|[^/]*$$|$(DEPDIR)/&|;s|\.o$$||'`;\ @am__fastdepCC_TRUE@ $(COMPILE) -MT $@ -MD -MP -MF $$depbase.Tpo -c -o $@ $< &&\ @am__fastdepCC_TRUE@ $(am__mv) $$depbase.Tpo $$depbase.Po @AMDEP_TRUE@@am__fastdepCC_FALSE@ $(AM_V_CC)source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCC_FALSE@ $(AM_V_CC@am__nodep@)$(COMPILE) -c -o $@ $< .c.obj: @am__fastdepCC_TRUE@ $(AM_V_CC)depbase=`echo $@ | sed 's|[^/]*$$|$(DEPDIR)/&|;s|\.obj$$||'`;\ @am__fastdepCC_TRUE@ $(COMPILE) -MT $@ -MD -MP -MF $$depbase.Tpo -c -o $@ `$(CYGPATH_W) '$<'` &&\ @am__fastdepCC_TRUE@ $(am__mv) $$depbase.Tpo $$depbase.Po @AMDEP_TRUE@@am__fastdepCC_FALSE@ $(AM_V_CC)source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCC_FALSE@ $(AM_V_CC@am__nodep@)$(COMPILE) -c -o $@ `$(CYGPATH_W) '$<'` .c.lo: @am__fastdepCC_TRUE@ $(AM_V_CC)depbase=`echo $@ | sed 's|[^/]*$$|$(DEPDIR)/&|;s|\.lo$$||'`;\ @am__fastdepCC_TRUE@ $(LTCOMPILE) -MT $@ -MD -MP -MF $$depbase.Tpo -c -o $@ $< &&\ @am__fastdepCC_TRUE@ $(am__mv) $$depbase.Tpo $$depbase.Plo @AMDEP_TRUE@@am__fastdepCC_FALSE@ $(AM_V_CC)source='$<' object='$@' libtool=yes @AMDEPBACKSLASH@ @AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCC_FALSE@ $(AM_V_CC@am__nodep@)$(LTCOMPILE) -c -o $@ $< mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs ID: $(am__tagged_files) $(am__define_uniq_tagged_files); mkid -fID $$unique tags: tags-am TAGS: tags tags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files) set x; \ here=`pwd`; \ $(am__define_uniq_tagged_files); \ shift; \ if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ if test $$# -gt 0; then \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ "$$@" $$unique; \ else \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ $$unique; \ fi; \ fi ctags: ctags-am CTAGS: ctags ctags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files) $(am__define_uniq_tagged_files); \ test -z "$(CTAGS_ARGS)$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$unique GTAGS: here=`$(am__cd) $(top_builddir) && pwd` \ && $(am__cd) $(top_srcdir) \ && gtags -i $(GTAGS_ARGS) "$$here" cscopelist: cscopelist-am cscopelist-am: $(am__tagged_files) list='$(am__tagged_files)'; \ case "$(srcdir)" in \ [\\/]* | ?:[\\/]*) sdir="$(srcdir)" ;; \ *) sdir=$(subdir)/$(srcdir) ;; \ esac; \ for i in $$list; do \ if test -f "$$i"; then \ echo "$(subdir)/$$i"; \ else \ echo "$$sdir/$$i"; \ fi; \ done >> $(top_builddir)/cscope.files distclean-tags: -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags distdir: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done check-am: all-am check: check-am all-am: Makefile $(PROGRAMS) installdirs: install: install-am install-exec: install-exec-am install-data: install-data-am uninstall: uninstall-am install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-am install-strip: if test -z '$(STRIP)'; then \ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ install; \ else \ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ fi mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-am clean-am: clean-generic clean-libtool clean-noinstPROGRAMS \ mostlyclean-am distclean: distclean-am -rm -rf ./$(DEPDIR) -rm -f Makefile distclean-am: clean-am distclean-compile distclean-generic \ distclean-tags dvi: dvi-am dvi-am: html: html-am html-am: info: info-am info-am: install-data-am: install-dvi: install-dvi-am install-dvi-am: install-exec-am: install-html: install-html-am install-html-am: install-info: install-info-am install-info-am: install-man: install-pdf: install-pdf-am install-pdf-am: install-ps: install-ps-am install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-am -rm -rf ./$(DEPDIR) -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-am mostlyclean-am: mostlyclean-compile mostlyclean-generic \ mostlyclean-libtool pdf: pdf-am pdf-am: ps: ps-am ps-am: uninstall-am: .MAKE: install-am install-strip .PHONY: CTAGS GTAGS TAGS all all-am check check-am clean clean-generic \ clean-libtool clean-noinstPROGRAMS cscopelist-am ctags \ ctags-am distclean distclean-compile distclean-generic \ distclean-libtool distclean-tags distdir dvi dvi-am html \ html-am info info-am install install-am install-data \ install-data-am install-dvi install-dvi-am install-exec \ install-exec-am install-html install-html-am install-info \ install-info-am install-man install-pdf install-pdf-am \ install-ps install-ps-am install-strip installcheck \ installcheck-am installdirs maintainer-clean \ maintainer-clean-generic mostlyclean mostlyclean-compile \ mostlyclean-generic mostlyclean-libtool pdf pdf-am ps ps-am \ tags tags-am uninstall uninstall-am # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: libstoragemgmt-1.2.3/tools/udev/90-scsi-ua.rules0000664000175000017500000000111712537546123016441 00000000000000#ACTION=="change", SUBSYSTEM=="scsi", ENV{SDEV_UA}=="INQUIRY_DATA_HAS_CHANGED", TEST=="rescan", ATTR{rescan}="x" #ACTION=="change", SUBSYSTEM=="scsi", ENV{SDEV_UA}=="CAPACITY_DATA_HAS_CHANGED", TEST=="rescan", ATTR{rescan}="x" #ACTION=="change", SUBSYSTEM=="scsi", ENV{SDEV_UA}=="THIN_PROVISIONING_SOFT_THRESHOLD_REACHED", TEST=="rescan", ATTR{rescan}="x" #ACTION=="change", SUBSYSTEM=="scsi", ENV{SDEV_UA}=="MODE_PARAMETERS_CHANGED", TEST=="rescan", ATTR{rescan}="x" ACTION=="change", SUBSYSTEM=="scsi", ENV{SDEV_UA}=="REPORTED_LUNS_DATA_HAS_CHANGED", RUN+="scan-scsi-target $env{DEVPATH}" libstoragemgmt-1.2.3/tools/bash_completion/0000775000175000017500000000000012542455463016060 500000000000000libstoragemgmt-1.2.3/tools/bash_completion/Makefile.am0000664000175000017500000000011512537546123020027 00000000000000if WITH_BASH_COMPLETION EXTRA_DIST=lsmcli bashcompletion_DATA = lsmcli endif libstoragemgmt-1.2.3/tools/bash_completion/Makefile.in0000664000175000017500000003637312542455445020061 00000000000000# Makefile.in generated by automake 1.13.4 from Makefile.am. # @configure_input@ # Copyright (C) 1994-2013 Free Software Foundation, Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__is_gnu_make = test -n '$(MAKEFILE_LIST)' && test -n '$(MAKELEVEL)' am__make_running_with_option = \ case $${target_option-} in \ ?) ;; \ *) echo "am__make_running_with_option: internal error: invalid" \ "target option '$${target_option-}' specified" >&2; \ exit 1;; \ esac; \ has_opt=no; \ sane_makeflags=$$MAKEFLAGS; \ if $(am__is_gnu_make); then \ sane_makeflags=$$MFLAGS; \ else \ case $$MAKEFLAGS in \ *\\[\ \ ]*) \ bs=\\; \ sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \ | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \ esac; \ fi; \ skip_next=no; \ strip_trailopt () \ { \ flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \ }; \ for flg in $$sane_makeflags; do \ test $$skip_next = yes && { skip_next=no; continue; }; \ case $$flg in \ *=*|--*) continue;; \ -*I) strip_trailopt 'I'; skip_next=yes;; \ -*I?*) strip_trailopt 'I';; \ -*O) strip_trailopt 'O'; skip_next=yes;; \ -*O?*) strip_trailopt 'O';; \ -*l) strip_trailopt 'l'; skip_next=yes;; \ -*l?*) strip_trailopt 'l';; \ -[dEDm]) skip_next=yes;; \ -[JT]) skip_next=yes;; \ esac; \ case $$flg in \ *$$target_option*) has_opt=yes; break;; \ esac; \ done; \ test $$has_opt = yes am__make_dryrun = (target_option=n; $(am__make_running_with_option)) am__make_keepgoing = (target_option=k; $(am__make_running_with_option)) pkgdatadir = $(datadir)/@PACKAGE@ pkgincludedir = $(includedir)/@PACKAGE@ pkglibdir = $(libdir)/@PACKAGE@ pkglibexecdir = $(libexecdir)/@PACKAGE@ am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ subdir = tools/bash_completion DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/Makefile.am ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = $(top_srcdir)/m4/ax_python_module.m4 \ $(top_srcdir)/m4/libtool.m4 $(top_srcdir)/m4/ltoptions.m4 \ $(top_srcdir)/m4/ltsugar.m4 $(top_srcdir)/m4/ltversion.m4 \ $(top_srcdir)/m4/lt~obsolete.m4 $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) mkinstalldirs = $(install_sh) -d CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = CONFIG_CLEAN_VPATH_FILES = AM_V_P = $(am__v_P_@AM_V@) am__v_P_ = $(am__v_P_@AM_DEFAULT_V@) am__v_P_0 = false am__v_P_1 = : AM_V_GEN = $(am__v_GEN_@AM_V@) am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@) am__v_GEN_0 = @echo " GEN " $@; am__v_GEN_1 = AM_V_at = $(am__v_at_@AM_V@) am__v_at_ = $(am__v_at_@AM_DEFAULT_V@) am__v_at_0 = @ am__v_at_1 = SOURCES = DIST_SOURCES = am__can_run_installinfo = \ case $$AM_UPDATE_INFO_DIR in \ n|no|NO) false;; \ *) (install-info --version) >/dev/null 2>&1;; \ esac am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; am__vpath_adj = case $$p in \ $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ *) f=$$p;; \ esac; am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`; am__install_max = 40 am__nobase_strip_setup = \ srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'` am__nobase_strip = \ for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||" am__nobase_list = $(am__nobase_strip_setup); \ for p in $$list; do echo "$$p $$p"; done | \ sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \ $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \ if (++n[$$2] == $(am__install_max)) \ { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \ END { for (dir in files) print dir, files[dir] }' am__base_list = \ sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \ sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g' am__uninstall_files_from_dir = { \ test -z "$$files" \ || { test ! -d "$$dir" && test ! -f "$$dir" && test ! -r "$$dir"; } \ || { echo " ( cd '$$dir' && rm -f" $$files ")"; \ $(am__cd) "$$dir" && rm -f $$files; }; \ } am__installdirs = "$(DESTDIR)$(bashcompletiondir)" DATA = $(bashcompletion_DATA) am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) ACLOCAL = @ACLOCAL@ AMTAR = @AMTAR@ AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@ AR = @AR@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ FGREP = @FGREP@ GREP = @GREP@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ JSON_CFLAGS = @JSON_CFLAGS@ JSON_LIBS = @JSON_LIBS@ LD = @LD@ LDFLAGS = @LDFLAGS@ LIBCHECK_CFLAGS = @LIBCHECK_CFLAGS@ LIBCHECK_LIBS = @LIBCHECK_LIBS@ LIBCONFIG_CFLAGS = @LIBCONFIG_CFLAGS@ LIBCONFIG_LIBS = @LIBCONFIG_LIBS@ LIBGLIB_CFLAGS = @LIBGLIB_CFLAGS@ LIBGLIB_LIBS = @LIBGLIB_LIBS@ LIBMICROHTTPD_CFLAGS = @LIBMICROHTTPD_CFLAGS@ LIBMICROHTTPD_LIBS = @LIBMICROHTTPD_LIBS@ LIBOBJS = @LIBOBJS@ LIBS = @LIBS@ LIBSM_LIBTOOL_VERSION = @LIBSM_LIBTOOL_VERSION@ LIBSM_MAJOR_VERSION = @LIBSM_MAJOR_VERSION@ LIBSM_MICRO_VERSION = @LIBSM_MICRO_VERSION@ LIBSM_MINOR_VERSION = @LIBSM_MINOR_VERSION@ LIBSM_VERSION = @LIBSM_VERSION@ LIBSM_VERSION_INFO = @LIBSM_VERSION_INFO@ LIBSM_VERSION_NUMBER = @LIBSM_VERSION_NUMBER@ LIBTOOL = @LIBTOOL@ LIBXML_CFLAGS = @LIBXML_CFLAGS@ LIBXML_LIBS = @LIBXML_LIBS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBOBJS = @LTLIBOBJS@ MAINT = @MAINT@ MAKEINFO = @MAKEINFO@ MANIFEST_TOOL = @MANIFEST_TOOL@ MKDIR_P = @MKDIR_P@ NM = @NM@ NMEDIT = @NMEDIT@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_URL = @PACKAGE_URL@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PERL = @PERL@ PKG_CONFIG = @PKG_CONFIG@ PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@ PKG_CONFIG_PATH = @PKG_CONFIG_PATH@ PYTHON = @PYTHON@ PYTHON_EXEC_PREFIX = @PYTHON_EXEC_PREFIX@ PYTHON_PLATFORM = @PYTHON_PLATFORM@ PYTHON_PREFIX = @PYTHON_PREFIX@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SSL_LIBS = @SSL_LIBS@ STRIP = @STRIP@ VERSION = @VERSION@ YAJL_LIBS = @YAJL_LIBS@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_AR = @ac_ct_AR@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ bashcompletiondir = @bashcompletiondir@ bindir = @bindir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ datadir = @datadir@ datarootdir = @datarootdir@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ install_sh = @install_sh@ libdir = @libdir@ libexecdir = @libexecdir@ localedir = @localedir@ localstatedir = @localstatedir@ mandir = @mandir@ mkdir_p = @mkdir_p@ oldincludedir = @oldincludedir@ pdfdir = @pdfdir@ pkgpyexecdir = @pkgpyexecdir@ pkgpythondir = @pkgpythondir@ prefix = @prefix@ program_transform_name = @program_transform_name@ psdir = @psdir@ pyexecdir = @pyexecdir@ pythondir = @pythondir@ sbindir = @sbindir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ systemdsystemunitdir = @systemdsystemunitdir@ target_alias = @target_alias@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ @WITH_BASH_COMPLETION_TRUE@EXTRA_DIST = lsmcli @WITH_BASH_COMPLETION_TRUE@bashcompletion_DATA = lsmcli all: all-am .SUFFIXES: $(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu tools/bash_completion/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --gnu tools/bash_completion/Makefile .PRECIOUS: Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs install-bashcompletionDATA: $(bashcompletion_DATA) @$(NORMAL_INSTALL) @list='$(bashcompletion_DATA)'; test -n "$(bashcompletiondir)" || list=; \ if test -n "$$list"; then \ echo " $(MKDIR_P) '$(DESTDIR)$(bashcompletiondir)'"; \ $(MKDIR_P) "$(DESTDIR)$(bashcompletiondir)" || exit 1; \ fi; \ for p in $$list; do \ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ echo "$$d$$p"; \ done | $(am__base_list) | \ while read files; do \ echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(bashcompletiondir)'"; \ $(INSTALL_DATA) $$files "$(DESTDIR)$(bashcompletiondir)" || exit $$?; \ done uninstall-bashcompletionDATA: @$(NORMAL_UNINSTALL) @list='$(bashcompletion_DATA)'; test -n "$(bashcompletiondir)" || list=; \ files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ dir='$(DESTDIR)$(bashcompletiondir)'; $(am__uninstall_files_from_dir) tags TAGS: ctags CTAGS: cscope cscopelist: distdir: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done check-am: all-am check: check-am all-am: Makefile $(DATA) installdirs: for dir in "$(DESTDIR)$(bashcompletiondir)"; do \ test -z "$$dir" || $(MKDIR_P) "$$dir"; \ done install: install-am install-exec: install-exec-am install-data: install-data-am uninstall: uninstall-am install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-am install-strip: if test -z '$(STRIP)'; then \ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ install; \ else \ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ fi mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-am clean-am: clean-generic clean-libtool mostlyclean-am distclean: distclean-am -rm -f Makefile distclean-am: clean-am distclean-generic dvi: dvi-am dvi-am: html: html-am html-am: info: info-am info-am: install-data-am: install-bashcompletionDATA install-dvi: install-dvi-am install-dvi-am: install-exec-am: install-html: install-html-am install-html-am: install-info: install-info-am install-info-am: install-man: install-pdf: install-pdf-am install-pdf-am: install-ps: install-ps-am install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-am -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-am mostlyclean-am: mostlyclean-generic mostlyclean-libtool pdf: pdf-am pdf-am: ps: ps-am ps-am: uninstall-am: uninstall-bashcompletionDATA .MAKE: install-am install-strip .PHONY: all all-am check check-am clean clean-generic clean-libtool \ cscopelist-am ctags-am distclean distclean-generic \ distclean-libtool distdir dvi dvi-am html html-am info info-am \ install install-am install-bashcompletionDATA install-data \ install-data-am install-dvi install-dvi-am install-exec \ install-exec-am install-html install-html-am install-info \ install-info-am install-man install-pdf install-pdf-am \ install-ps install-ps-am install-strip installcheck \ installcheck-am installdirs maintainer-clean \ maintainer-clean-generic mostlyclean mostlyclean-generic \ mostlyclean-libtool pdf pdf-am ps ps-am tags-am uninstall \ uninstall-am uninstall-bashcompletionDATA # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: libstoragemgmt-1.2.3/tools/bash_completion/lsmcli0000664000175000017500000004007212542455432017205 00000000000000# Copyright (C) 2015 Red Hat, Inc., Tony Asleson # Distributed under the GNU General Public License, version 2.0. # See: https://www.gnu.org/licenses/gpl-2.0.html # # Bash completion for lsmcli. This may be far from ideal, # suggestions & improvements appreciated! potential_args='' # Skip value lookups by default NO_VALUE_LOOKUP=${LSMCLI_AUTO_COMPLETE_VALUE:=0} function join { local IFS="$1"; shift; echo "$*"; } # Linear search of an array of strings for the specified string function listcontains() { declare -a the_list=("${!1}") for word in "${the_list[@]}" ; do [[ ${word} == $2 ]] && return 0 done return 1 } # Given a list of what is possible and what is on the command line return # what is left. # $1 What is possible # Retults are returned in global string $potential_args function possible_args() { local l=() for i in $1 do listcontains COMP_WORDS[@] "$i" if [[ $? -eq 1 ]] ; then l+=("$i") fi done potential_args=$( join ' ', "${l[@]}" ) } # Returns the position of the value in the COMP_WORDS that contains $1, or # 255 if it doesn't exist function arg_index() { count=0 for i in "${COMP_WORDS[@]}" do if [[ "$i" == "$1" ]] ; then return ${count} fi let count+=1 done return 255 } function _lsm() { local cur prev opts sep='#' COMPREPLY=() cur="${COMP_WORDS[COMP_CWORD]}" prev="${COMP_WORDS[COMP_CWORD-1]}" opts_short="-b -v -u -P -H -t -e -f -w -b" opts_long=" --help --version --uri --prompt --human --terse --enum \ --force --wait --header --script " opts_cmds="list job-status capabilities plugin-info volume-create \ volume-delete volume-resize volume-replicate \ volume-replicate-range volume-replicate-range-block-size \ volume-dependants volume-dependants-rm volume-access-group \ volume-mask volume-unmask access-group-create \ access-group-delete access-group-add access-group-remove \ volume-enable volume-disable iscsi-chap fs-create fs-delete \ fs-resize fs-export fs-unexport fs-clone fs-snap-create \ fs-snap-delete fs-snap-restore fs-dependants fs-dependants-rm \ file-clone ls lp lv ld la lf lt c p vc vd vr vm vi ve vi ac \ aa ar ad vri volume-raid-info pool-member-info pmi \ vrc volume-raid-create vrcc volume-raid-create-cap" list_args="--type" list_type_args="volumes pools fs snapshots exports nfs_client_auth \ access_groups systems disks plugins target_ports" opts_filter="--sys --pool --vol --disk --ag --fs --nfs" cap_args="--sys" volume_create_args="--name --size --pool" volume_delete_args="--vol --force" # Should force be here, to easy to tab through?" volume_resize_args="--vol --size --force" # Should force be here, to easy to tab through?" volume_replicate_args="--vol --name --rep-type" # Hmmm, this looks like a bug with CLI, should support lower and upper case? volume_rep_types="CLONE COPY MIRROR_ASYNC MIRROR_SYNC" volume_replicate_range_args="--src-vol --dst-vol --rep-type --src-start \ --dst-start --count --force" # Force ? volume_replication_range_bs="--sys" volume_dependants="--vol" volume_access_group_args="--vol" volume_masking_args="--vol --ag" access_group_create_args="--name --init --sys" access_group_delete_args="--ag" access_group_add_remove_args="--ag --init" volume_enable_disable_args="--vol" volume_raidinfo_args="--vol" iscsi_chap_args="--in-user --in-pass --out-user --out-pass" fs_create_args="--name --size --pool" fs_delete_args="--fs --force" # Force ? fs_resize_args="--fs --size --force" # Force ? fs_export_args="--fs --exportpath --anonuid --auth-type --root-host --ro-host --rw-host" fs_unexport_args="--export" fs_clone_args="--src-fs --dst-name" fs_snap_create_args="--name --fs" fs_snap_delete_args="--snap --fs" fs_snap_restore_args="--snap --fs --file --fileas --force" fs_dependants_args="--fs" file_clone_args="--fs --src --dst --backing-snapshot" pool_member_info_args="--pool" volume_raid_create_args="--name --disk --raid-type --strip-size" volume_raid_create_cap_args="--sys" # These operations can potentially be slow and cause hangs depending on plugin and configuration if [[ ${NO_VALUE_LOOKUP} -ne 0 ]] ; then # Check if we have somthing present that we can help the user with case "${prev}" in --sys) # Is there a better way todo this? local items=`lsmcli list --type systems -t${sep} | awk -F ${sep} '{print $1}'` COMPREPLY=( $(compgen -W "${items}" -- ${cur}) ) return 0 ;; --pool) # Is there a better way todo this? local items=`lsmcli list --type pools -t${sep} | awk -F ${sep} '{print $1}'` COMPREPLY=( $(compgen -W "${items}" -- ${cur}) ) return 0 ;; --vol|--src-vol|--dst-vol) # Is there a better way todo this? local items=`lsmcli list --type volumes -t${sep} | awk -F ${sep} '{print $1}'` COMPREPLY=( $(compgen -W "${items}" -- ${cur}) ) return 0 ;; --disk) # Is there a better way todo this? local items=`lsmcli list --type disks -t${sep} | awk -F ${sep} '{print $1}'` COMPREPLY=( $(compgen -W "${items}" -- ${cur}) ) return 0 ;; --ag) # Is there a better way todo this? local items=`lsmcli list --type access_groups -t${sep} | awk -F ${sep} '{print $1}'` COMPREPLY=( $(compgen -W "${items}" -- ${cur}) ) return 0 ;; --init) arg_index "--ag" i=$? # We have an access group present on the command line so filter the intiators to it if [[ ${i} -ne 255 ]]; then # It would be better if we filtered the result with the access group # if it's present on the command line already. local items=`lsmcli list --type access_groups -t${sep} --ag ${COMP_WORDS[${i}+1]} | awk -F ${sep} '{print $3}'` COMPREPLY=( $(compgen -W "${items}" -- ${cur}) ) return 0 else local items=`lsmcli list --type access_groups -t${sep} | awk -F ${sep} '{print $3}'` COMPREPLY=( $(compgen -W "${items}" -- ${cur}) ) return 0 fi ;; --nfs-export) # Is there a better way todo this? local items=`lsmcli list --type exports -t${sep} | awk -F ${sep} '{print $1}'` COMPREPLY=( $(compgen -W "${items}" -- ${cur}) ) return 0 ;; --tgt) # Is there a better way todo this? local items=`lsmcli list --type target_ports -t${sep} | awk -F ${sep} '{print $1}'` COMPREPLY=( $(compgen -W "${items}" -- ${cur}) ) return 0 ;; --fs|--src-fs) local items=`lsmcli list --type fs -t${sep} | awk -F ${sep} '{print $1}'` COMPREPLY=( $(compgen -W "${items}" -- ${cur}) ) return 0 ;; --export) local items=`lsmcli list --type exports -t${sep} | awk -F ${sep} '{print $1}'` COMPREPLY=( $(compgen -W "${items}" -- ${cur}) ) return 0 ;; --snap) arg_index "--fs" i=$? # We have an access group present on the command line so filter the snapshots to it if [[ ${i} -ne 255 ]]; then local items=`lsmcli list --type snapshots \ --fs ${COMP_WORDS[${i}+1]} -t${sep} | awk -F ${sep} '{print $1}'` COMPREPLY=( $(compgen -W "${items}" -- ${cur}) ) return 0 else COMPREPLY=( $(compgen -W "" -- ${cur}) ) return 0 fi ;; --auth-type) local items=`lsmcli list --type nfs_client_auth -t ' '` COMPREPLY=( $(compgen -W "${items}" -- ${cur}) ) return 0 ;; *) ;; esac fi # Cases where we don't have to worry about look-up time case "${prev}" in --type) COMPREPLY=( $(compgen -W "${list_type_args}" -- ${cur}) ) return 0 ;; --size|--count|--src-start|--dst-start|--name|--in-user|--in-pass|\ --out-user|--out-pass|--exportpath|--anonuid|--root-host|--ro-host|\ --rw-host|--dest-name|--file|--fileas|--src|--dst) # These we cannot lookup, so don't offer any values COMPREPLY=( $(compgen -W "" -- ${cur}) ) return 0 ;; --rep-type) COMPREPLY=( $(compgen -W "${volume_rep_types}" -- ${cur}) ) return 0 ;; snapshots) # Specific listing case where you need a fs too if [[ ${COMP_WORDS[COMP_CWORD-2]} == '--type' && \ ${COMP_WORDS[COMP_CWORD-3]} == 'list' ]] ; then COMPREPLY=( $(compgen -W "--fs" -- ${cur}) ) return 0 fi ;; *) esac case "${COMP_WORDS[1]}" in job-status) possible_args "--job" COMPREPLY=( $(compgen -W "${potential_args}" -- ${cur}) ) return 0 ;; list) possible_args ${list_args} COMPREPLY=( $(compgen -W "${potential_args}" -- ${cur}) ) return 0 ;; volume-create|vc) possible_args "${volume_create_args}" COMPREPLY=( $(compgen -W "${potential_args}" -- ${cur}) ) return 0 ;; volume-delete|vd) possible_args "${volume_delete_args}" COMPREPLY=( $(compgen -W "${potential_args}" -- ${cur}) ) return 0 ;; volume-raid-info|vri) possible_args "${volume_raidinfo_args}" COMPREPLY=( $(compgen -W "${potential_args}" -- ${cur}) ) return 0 ;; volume-resize|vr) possible_args "${volume_resize_args}" COMPREPLY=( $(compgen -W "${potential_args}" -- ${cur}) ) return 0 ;; volume-replicate) possible_args "${volume_replicate_args}" COMPREPLY=( $(compgen -W "${potential_args}" -- ${cur}) ) return 0 ;; volume-replicate-range) possible_args "${volume_replicate_range_args}" COMPREPLY=( $(compgen -W "${potential_args}" -- ${cur}) ) return 0 ;; volume-replicate-range-block-size) possible_args "${volume_replication_range_bs}" COMPREPLY=( $(compgen -W "${potential_args}" -- ${cur}) ) return 0 ;; volume-dependants|volume-dependants-rm) possible_args "${volume_dependants}" COMPREPLY=( $(compgen -W "${potential_args}" -- ${cur}) ) return 0 ;; volume-access-group) possible_args "${volume_access_group_args}" COMPREPLY=( $(compgen -W "${potential_args}" -- ${cur}) ) return 0 ;; volume-mask|volume-unmask|vm|vu) possible_args "${volume_masking_args}" COMPREPLY=( $(compgen -W "${potential_args}" -- ${cur}) ) return 0 ;; access-group-create|ac) possible_args "${access_group_create_args}" COMPREPLY=( $(compgen -W "${potential_args}" -- ${cur}) ) return 0 ;; access-group-delete|ad) possible_args "${access_group_delete_args}" COMPREPLY=( $(compgen -W "${potential_args}" -- ${cur}) ) return 0 ;; access-group-add|access-group-remove|aa|ar) possible_args "${access_group_add_remove_args}" COMPREPLY=( $(compgen -W "${potential_args}" -- ${cur}) ) return 0 ;; volume-enable|volume-disable|ve|vi) possible_args "${volume_enable_disable_args}" COMPREPLY=( $(compgen -W "${potential_args}" -- ${cur}) ) return 0 ;; iscsi-chap) possible_args "${iscsi_chap_args}" COMPREPLY=( $(compgen -W "${potential_args}" -- ${cur}) ) return 0 ;; fs-create) possible_args "${fs_create_args}" COMPREPLY=( $(compgen -W "${potential_args}" -- ${cur}) ) return 0 ;; fs-delete) possible_args "${fs_delete_args}" COMPREPLY=( $(compgen -W "${potential_args}" -- ${cur}) ) return 0 ;; fs-resize) possible_args "${fs_resize_args}" COMPREPLY=( $(compgen -W "${potential_args}" -- ${cur}) ) return 0 ;; fs-export) possible_args "${fs_export_args}" COMPREPLY=( $(compgen -W "${potential_args}" -- ${cur}) ) return 0 ;; fs-unexport) possible_args "${fs_unexport_args}" COMPREPLY=( $(compgen -W "${potential_args}" -- ${cur}) ) return 0 ;; fs-clone) possible_args "${fs_clone_args}" COMPREPLY=( $(compgen -W "${potential_args}" -- ${cur}) ) return 0 ;; fs-snap-create) possible_args "${fs_snap_create_args}" COMPREPLY=( $(compgen -W "${potential_args}" -- ${cur}) ) return 0 ;; fs-snap-delete) possible_args "${fs_snap_delete_args}" COMPREPLY=( $(compgen -W "${potential_args}" -- ${cur}) ) return 0 ;; fs-snap-restore) possible_args "${fs_snap_restore_args}" COMPREPLY=( $(compgen -W "${potential_args}" -- ${cur}) ) return 0 ;; fs-dependants|fs-dependants-rm) possible_args "${fs_dependants_args}" COMPREPLY=( $(compgen -W "${potential_args}" -- ${cur}) ) return 0 ;; file-clone) possible_args "${file_clone_args}" COMPREPLY=( $(compgen -W "${potential_args}" -- ${cur}) ) return 0 ;; capabilities|c) possible_args "${cap_args}" COMPREPLY=( $(compgen -W "${potential_args}" -- ${cur}) ) return 0 ;; pool-member-info|pmi) possible_args "${pool_member_info_args}" COMPREPLY=( $(compgen -W "${potential_args}" -- ${cur}) ) return 0 ;; volume-raid-create|vrc) possible_args "${volume_raid_create_args}" COMPREPLY=( $(compgen -W "${potential_args}" -- ${cur}) ) return 0 ;; volume-raid-create-cap|vrcc) possible_args "${volume_raid_create_cap_args}" COMPREPLY=( $(compgen -W "${potential_args}" -- ${cur}) ) return 0 ;; *) ;; esac # Handle the case where we are starting out with nothing if [[ ${prev} == 'lsmcli' ]] ; then if [[ ${cur} == --* ]] ; then COMPREPLY=( $(compgen -W "${opts_long}" -- ${cur}) ) return 0 fi if [[ ${cur} == -* ]] ; then COMPREPLY=( $(compgen -W "${opts_short}${opts_long}" -- ${cur}) ) return 0 fi if [[ ${cur} == * ]] ; then COMPREPLY=( $(compgen -W "${opts_short}${opts_long}${opts_cmds}" -- ${cur}) ) return 0 fi fi } complete -F _lsm lsmcli libstoragemgmt-1.2.3/tools/Makefile.am0000664000175000017500000000015112537546123014661 00000000000000## Process this file with automake to produce Makefile.in SUBDIRS = lsmcli udev utility bash_completion libstoragemgmt-1.2.3/tools/Makefile.in0000664000175000017500000004607012542455445014706 00000000000000# Makefile.in generated by automake 1.13.4 from Makefile.am. # @configure_input@ # Copyright (C) 1994-2013 Free Software Foundation, Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__is_gnu_make = test -n '$(MAKEFILE_LIST)' && test -n '$(MAKELEVEL)' am__make_running_with_option = \ case $${target_option-} in \ ?) ;; \ *) echo "am__make_running_with_option: internal error: invalid" \ "target option '$${target_option-}' specified" >&2; \ exit 1;; \ esac; \ has_opt=no; \ sane_makeflags=$$MAKEFLAGS; \ if $(am__is_gnu_make); then \ sane_makeflags=$$MFLAGS; \ else \ case $$MAKEFLAGS in \ *\\[\ \ ]*) \ bs=\\; \ sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \ | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \ esac; \ fi; \ skip_next=no; \ strip_trailopt () \ { \ flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \ }; \ for flg in $$sane_makeflags; do \ test $$skip_next = yes && { skip_next=no; continue; }; \ case $$flg in \ *=*|--*) continue;; \ -*I) strip_trailopt 'I'; skip_next=yes;; \ -*I?*) strip_trailopt 'I';; \ -*O) strip_trailopt 'O'; skip_next=yes;; \ -*O?*) strip_trailopt 'O';; \ -*l) strip_trailopt 'l'; skip_next=yes;; \ -*l?*) strip_trailopt 'l';; \ -[dEDm]) skip_next=yes;; \ -[JT]) skip_next=yes;; \ esac; \ case $$flg in \ *$$target_option*) has_opt=yes; break;; \ esac; \ done; \ test $$has_opt = yes am__make_dryrun = (target_option=n; $(am__make_running_with_option)) am__make_keepgoing = (target_option=k; $(am__make_running_with_option)) pkgdatadir = $(datadir)/@PACKAGE@ pkgincludedir = $(includedir)/@PACKAGE@ pkglibdir = $(libdir)/@PACKAGE@ pkglibexecdir = $(libexecdir)/@PACKAGE@ am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ subdir = tools DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/Makefile.am ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = $(top_srcdir)/m4/ax_python_module.m4 \ $(top_srcdir)/m4/libtool.m4 $(top_srcdir)/m4/ltoptions.m4 \ $(top_srcdir)/m4/ltsugar.m4 $(top_srcdir)/m4/ltversion.m4 \ $(top_srcdir)/m4/lt~obsolete.m4 $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) mkinstalldirs = $(install_sh) -d CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = CONFIG_CLEAN_VPATH_FILES = AM_V_P = $(am__v_P_@AM_V@) am__v_P_ = $(am__v_P_@AM_DEFAULT_V@) am__v_P_0 = false am__v_P_1 = : AM_V_GEN = $(am__v_GEN_@AM_V@) am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@) am__v_GEN_0 = @echo " GEN " $@; am__v_GEN_1 = AM_V_at = $(am__v_at_@AM_V@) am__v_at_ = $(am__v_at_@AM_DEFAULT_V@) am__v_at_0 = @ am__v_at_1 = SOURCES = DIST_SOURCES = RECURSIVE_TARGETS = all-recursive check-recursive cscopelist-recursive \ ctags-recursive dvi-recursive html-recursive info-recursive \ install-data-recursive install-dvi-recursive \ install-exec-recursive install-html-recursive \ install-info-recursive install-pdf-recursive \ install-ps-recursive install-recursive installcheck-recursive \ installdirs-recursive pdf-recursive ps-recursive \ tags-recursive uninstall-recursive am__can_run_installinfo = \ case $$AM_UPDATE_INFO_DIR in \ n|no|NO) false;; \ *) (install-info --version) >/dev/null 2>&1;; \ esac RECURSIVE_CLEAN_TARGETS = mostlyclean-recursive clean-recursive \ distclean-recursive maintainer-clean-recursive am__recursive_targets = \ $(RECURSIVE_TARGETS) \ $(RECURSIVE_CLEAN_TARGETS) \ $(am__extra_recursive_targets) AM_RECURSIVE_TARGETS = $(am__recursive_targets:-recursive=) TAGS CTAGS \ distdir am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) # Read a list of newline-separated strings from the standard input, # and print each of them once, without duplicates. Input order is # *not* preserved. am__uniquify_input = $(AWK) '\ BEGIN { nonempty = 0; } \ { items[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in items) print i; }; } \ ' # Make sure the list of sources is unique. This is necessary because, # e.g., the same source file might be shared among _SOURCES variables # for different programs/libraries. am__define_uniq_tagged_files = \ list='$(am__tagged_files)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | $(am__uniquify_input)` ETAGS = etags CTAGS = ctags DIST_SUBDIRS = $(SUBDIRS) DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) am__relativize = \ dir0=`pwd`; \ sed_first='s,^\([^/]*\)/.*$$,\1,'; \ sed_rest='s,^[^/]*/*,,'; \ sed_last='s,^.*/\([^/]*\)$$,\1,'; \ sed_butlast='s,/*[^/]*$$,,'; \ while test -n "$$dir1"; do \ first=`echo "$$dir1" | sed -e "$$sed_first"`; \ if test "$$first" != "."; then \ if test "$$first" = ".."; then \ dir2=`echo "$$dir0" | sed -e "$$sed_last"`/"$$dir2"; \ dir0=`echo "$$dir0" | sed -e "$$sed_butlast"`; \ else \ first2=`echo "$$dir2" | sed -e "$$sed_first"`; \ if test "$$first2" = "$$first"; then \ dir2=`echo "$$dir2" | sed -e "$$sed_rest"`; \ else \ dir2="../$$dir2"; \ fi; \ dir0="$$dir0"/"$$first"; \ fi; \ fi; \ dir1=`echo "$$dir1" | sed -e "$$sed_rest"`; \ done; \ reldir="$$dir2" ACLOCAL = @ACLOCAL@ AMTAR = @AMTAR@ AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@ AR = @AR@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ FGREP = @FGREP@ GREP = @GREP@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ JSON_CFLAGS = @JSON_CFLAGS@ JSON_LIBS = @JSON_LIBS@ LD = @LD@ LDFLAGS = @LDFLAGS@ LIBCHECK_CFLAGS = @LIBCHECK_CFLAGS@ LIBCHECK_LIBS = @LIBCHECK_LIBS@ LIBCONFIG_CFLAGS = @LIBCONFIG_CFLAGS@ LIBCONFIG_LIBS = @LIBCONFIG_LIBS@ LIBGLIB_CFLAGS = @LIBGLIB_CFLAGS@ LIBGLIB_LIBS = @LIBGLIB_LIBS@ LIBMICROHTTPD_CFLAGS = @LIBMICROHTTPD_CFLAGS@ LIBMICROHTTPD_LIBS = @LIBMICROHTTPD_LIBS@ LIBOBJS = @LIBOBJS@ LIBS = @LIBS@ LIBSM_LIBTOOL_VERSION = @LIBSM_LIBTOOL_VERSION@ LIBSM_MAJOR_VERSION = @LIBSM_MAJOR_VERSION@ LIBSM_MICRO_VERSION = @LIBSM_MICRO_VERSION@ LIBSM_MINOR_VERSION = @LIBSM_MINOR_VERSION@ LIBSM_VERSION = @LIBSM_VERSION@ LIBSM_VERSION_INFO = @LIBSM_VERSION_INFO@ LIBSM_VERSION_NUMBER = @LIBSM_VERSION_NUMBER@ LIBTOOL = @LIBTOOL@ LIBXML_CFLAGS = @LIBXML_CFLAGS@ LIBXML_LIBS = @LIBXML_LIBS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBOBJS = @LTLIBOBJS@ MAINT = @MAINT@ MAKEINFO = @MAKEINFO@ MANIFEST_TOOL = @MANIFEST_TOOL@ MKDIR_P = @MKDIR_P@ NM = @NM@ NMEDIT = @NMEDIT@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_URL = @PACKAGE_URL@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PERL = @PERL@ PKG_CONFIG = @PKG_CONFIG@ PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@ PKG_CONFIG_PATH = @PKG_CONFIG_PATH@ PYTHON = @PYTHON@ PYTHON_EXEC_PREFIX = @PYTHON_EXEC_PREFIX@ PYTHON_PLATFORM = @PYTHON_PLATFORM@ PYTHON_PREFIX = @PYTHON_PREFIX@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SSL_LIBS = @SSL_LIBS@ STRIP = @STRIP@ VERSION = @VERSION@ YAJL_LIBS = @YAJL_LIBS@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_AR = @ac_ct_AR@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ bashcompletiondir = @bashcompletiondir@ bindir = @bindir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ datadir = @datadir@ datarootdir = @datarootdir@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ install_sh = @install_sh@ libdir = @libdir@ libexecdir = @libexecdir@ localedir = @localedir@ localstatedir = @localstatedir@ mandir = @mandir@ mkdir_p = @mkdir_p@ oldincludedir = @oldincludedir@ pdfdir = @pdfdir@ pkgpyexecdir = @pkgpyexecdir@ pkgpythondir = @pkgpythondir@ prefix = @prefix@ program_transform_name = @program_transform_name@ psdir = @psdir@ pyexecdir = @pyexecdir@ pythondir = @pythondir@ sbindir = @sbindir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ systemdsystemunitdir = @systemdsystemunitdir@ target_alias = @target_alias@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ SUBDIRS = lsmcli udev utility bash_completion all: all-recursive .SUFFIXES: $(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu tools/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --gnu tools/Makefile .PRECIOUS: Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs # This directory's subdirectories are mostly independent; you can cd # into them and run 'make' without going through this Makefile. # To change the values of 'make' variables: instead of editing Makefiles, # (1) if the variable is set in 'config.status', edit 'config.status' # (which will cause the Makefiles to be regenerated when you run 'make'); # (2) otherwise, pass the desired values on the 'make' command line. $(am__recursive_targets): @fail=; \ if $(am__make_keepgoing); then \ failcom='fail=yes'; \ else \ failcom='exit 1'; \ fi; \ dot_seen=no; \ target=`echo $@ | sed s/-recursive//`; \ case "$@" in \ distclean-* | maintainer-clean-*) list='$(DIST_SUBDIRS)' ;; \ *) list='$(SUBDIRS)' ;; \ esac; \ for subdir in $$list; do \ echo "Making $$target in $$subdir"; \ if test "$$subdir" = "."; then \ dot_seen=yes; \ local_target="$$target-am"; \ else \ local_target="$$target"; \ fi; \ ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \ || eval $$failcom; \ done; \ if test "$$dot_seen" = "no"; then \ $(MAKE) $(AM_MAKEFLAGS) "$$target-am" || exit 1; \ fi; test -z "$$fail" ID: $(am__tagged_files) $(am__define_uniq_tagged_files); mkid -fID $$unique tags: tags-recursive TAGS: tags tags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files) set x; \ here=`pwd`; \ if ($(ETAGS) --etags-include --version) >/dev/null 2>&1; then \ include_option=--etags-include; \ empty_fix=.; \ else \ include_option=--include; \ empty_fix=; \ fi; \ list='$(SUBDIRS)'; for subdir in $$list; do \ if test "$$subdir" = .; then :; else \ test ! -f $$subdir/TAGS || \ set "$$@" "$$include_option=$$here/$$subdir/TAGS"; \ fi; \ done; \ $(am__define_uniq_tagged_files); \ shift; \ if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ if test $$# -gt 0; then \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ "$$@" $$unique; \ else \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ $$unique; \ fi; \ fi ctags: ctags-recursive CTAGS: ctags ctags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files) $(am__define_uniq_tagged_files); \ test -z "$(CTAGS_ARGS)$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$unique GTAGS: here=`$(am__cd) $(top_builddir) && pwd` \ && $(am__cd) $(top_srcdir) \ && gtags -i $(GTAGS_ARGS) "$$here" cscopelist: cscopelist-recursive cscopelist-am: $(am__tagged_files) list='$(am__tagged_files)'; \ case "$(srcdir)" in \ [\\/]* | ?:[\\/]*) sdir="$(srcdir)" ;; \ *) sdir=$(subdir)/$(srcdir) ;; \ esac; \ for i in $$list; do \ if test -f "$$i"; then \ echo "$(subdir)/$$i"; \ else \ echo "$$sdir/$$i"; \ fi; \ done >> $(top_builddir)/cscope.files distclean-tags: -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags distdir: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done @list='$(DIST_SUBDIRS)'; for subdir in $$list; do \ if test "$$subdir" = .; then :; else \ $(am__make_dryrun) \ || test -d "$(distdir)/$$subdir" \ || $(MKDIR_P) "$(distdir)/$$subdir" \ || exit 1; \ dir1=$$subdir; dir2="$(distdir)/$$subdir"; \ $(am__relativize); \ new_distdir=$$reldir; \ dir1=$$subdir; dir2="$(top_distdir)"; \ $(am__relativize); \ new_top_distdir=$$reldir; \ echo " (cd $$subdir && $(MAKE) $(AM_MAKEFLAGS) top_distdir="$$new_top_distdir" distdir="$$new_distdir" \\"; \ echo " am__remove_distdir=: am__skip_length_check=: am__skip_mode_fix=: distdir)"; \ ($(am__cd) $$subdir && \ $(MAKE) $(AM_MAKEFLAGS) \ top_distdir="$$new_top_distdir" \ distdir="$$new_distdir" \ am__remove_distdir=: \ am__skip_length_check=: \ am__skip_mode_fix=: \ distdir) \ || exit 1; \ fi; \ done check-am: all-am check: check-recursive all-am: Makefile installdirs: installdirs-recursive installdirs-am: install: install-recursive install-exec: install-exec-recursive install-data: install-data-recursive uninstall: uninstall-recursive install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-recursive install-strip: if test -z '$(STRIP)'; then \ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ install; \ else \ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ fi mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-recursive clean-am: clean-generic clean-libtool mostlyclean-am distclean: distclean-recursive -rm -f Makefile distclean-am: clean-am distclean-generic distclean-tags dvi: dvi-recursive dvi-am: html: html-recursive html-am: info: info-recursive info-am: install-data-am: install-dvi: install-dvi-recursive install-dvi-am: install-exec-am: install-html: install-html-recursive install-html-am: install-info: install-info-recursive install-info-am: install-man: install-pdf: install-pdf-recursive install-pdf-am: install-ps: install-ps-recursive install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-recursive -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-recursive mostlyclean-am: mostlyclean-generic mostlyclean-libtool pdf: pdf-recursive pdf-am: ps: ps-recursive ps-am: uninstall-am: .MAKE: $(am__recursive_targets) install-am install-strip .PHONY: $(am__recursive_targets) CTAGS GTAGS TAGS all all-am check \ check-am clean clean-generic clean-libtool cscopelist-am ctags \ ctags-am distclean distclean-generic distclean-libtool \ distclean-tags distdir dvi dvi-am html html-am info info-am \ install install-am install-data install-data-am install-dvi \ install-dvi-am install-exec install-exec-am install-html \ install-html-am install-info install-info-am install-man \ install-pdf install-pdf-am install-ps install-ps-am \ install-strip installcheck installcheck-am installdirs \ installdirs-am maintainer-clean maintainer-clean-generic \ mostlyclean mostlyclean-generic mostlyclean-libtool pdf pdf-am \ ps ps-am tags tags-am uninstall uninstall-am # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: libstoragemgmt-1.2.3/tools/lsmcli/0000775000175000017500000000000012542455463014175 500000000000000libstoragemgmt-1.2.3/tools/lsmcli/cmdline.py0000664000175000017500000014562312537737032016114 00000000000000# Copyright (C) 2012-2014 Red Hat, Inc. # This library is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2.1 of the License, or any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library; If not, see . # # Author: tasleson # Gris Ge import os import sys import getpass import time import tty import termios try: from collections import OrderedDict except ImportError: # python 2.6 or earlier, use backport from ordereddict import OrderedDict from argparse import ArgumentParser from argparse import RawTextHelpFormatter from lsm import (Client, Pool, VERSION, LsmError, Disk, Volume, JobStatus, ErrorNumber, BlockRange, uri_parse, Proxy, size_human_2_size_bytes, AccessGroup, FileSystem, NfsExport, TargetPort) from lsm.lsmcli.data_display import ( DisplayData, PlugData, out, vol_provision_str_to_type, vol_rep_type_str_to_type, VolumeRAIDInfo, PoolRAIDInfo, VcrCap) ## Wraps the invocation to the command line # @param c Object to invoke calls on (optional) def cmd_line_wrapper(c=None): """ Common command line code, called. """ err_exit = 0 cli = None try: cli = CmdLine() cli.process(c) except ArgError as ae: sys.stderr.write(str(ae)) sys.stderr.flush() err_exit = 2 except LsmError as le: sys.stderr.write(str(le) + "\n") sys.stderr.flush() err_exit = 4 except KeyboardInterrupt: err_exit = 1 finally: # We got here because of an exception, but we still may have a valid # connection to do an orderly shutdown with, lets try it before we # just exit closing the connection. if cli: try: # This will exit if are successful cli.shutdown(err_exit) except Exception: pass sys.exit(err_exit) ## Get a character from stdin without needing a return key pressed. # Returns the character pressed def getch(): fd = sys.stdin.fileno() prev = termios.tcgetattr(fd) try: tty.setraw(sys.stdin.fileno()) ch = sys.stdin.read(1) finally: termios.tcsetattr(fd, termios.TCSADRAIN, prev) return ch def parse_convert_init(init_id): """ If init_id is a WWPN, convert it into LSM standard version: (?:[0-9a-f]{2}:){7}[0-9a-f]{2} Return (converted_init_id, lsm_init_type) """ valid, init_type, init_id = AccessGroup.initiator_id_verify(init_id) if valid: return (init_id, init_type) raise ArgError("--init-id %s is not a valid WWPN or iSCSI IQN" % init_id) ## This class represents a command line argument error class ArgError(Exception): def __init__(self, message, *args, **kwargs): """ Class represents an error. """ Exception.__init__(self, *args, **kwargs) self.msg = message def __str__(self): return "%s: error: %s\n" % (os.path.basename(sys.argv[0]), self.msg) ## Finds an item based on the id. Each list item requires a member "id" # @param l list to search # @param the_id the id to match # @param friendly_name - name to put in the exception saying what we # couldn't find def _get_item(l, the_id, friendly_name='item', raise_error=True): for item in l: if item.id == the_id: return item if raise_error: raise ArgError('%s with ID %s not found!' % (friendly_name, the_id)) else: return None list_choices = ['VOLUMES', 'POOLS', 'FS', 'SNAPSHOTS', 'EXPORTS', "NFS_CLIENT_AUTH", 'ACCESS_GROUPS', 'SYSTEMS', 'DISKS', 'PLUGINS', 'TARGET_PORTS'] provision_types = ('DEFAULT', 'THIN', 'FULL') provision_help = "provisioning type: " + ", ".join(provision_types) replicate_types = ('CLONE', 'COPY', 'MIRROR_ASYNC', 'MIRROR_SYNC') replicate_help = "replication type: " + ", ".join(replicate_types) size_help = 'Can use B, KiB, MiB, GiB, TiB, PiB postfix (IEC sizing)' sys_id_opt = dict(name='--sys', metavar='', help='System ID') sys_id_filter_opt = sys_id_opt.copy() sys_id_filter_opt['help'] = 'Search by System ID' pool_id_opt = dict(name='--pool', metavar='', help='Pool ID') pool_id_filter_opt = pool_id_opt.copy() pool_id_filter_opt['help'] = 'Search by Pool ID' vol_id_opt = dict(name='--vol', metavar='', help='Volume ID') vol_id_filter_opt = vol_id_opt.copy() vol_id_filter_opt['help'] = 'Search by Volume ID' fs_id_opt = dict(name='--fs', metavar='', help='File System ID') ag_id_opt = dict(name='--ag', metavar='', help='Access Group ID') ag_id_filter_opt = ag_id_opt.copy() ag_id_filter_opt['help'] = 'Search by Access Group ID' init_id_opt = dict(name='--init', metavar='', help='Initiator ID') snap_id_opt = dict(name='--snap', metavar='', help='Snapshot ID') export_id_opt = dict(name='--export', metavar='', help='Export ID') nfs_export_id_filter_opt = dict( name='--nfs-export', metavar='', help='Search by NFS Export ID') disk_id_filter_opt = dict(name='--disk', metavar='', help='Search by Disk ID') size_opt = dict(name='--size', metavar='', help=size_help) tgt_id_opt = dict(name="--tgt", help="Search by target port ID", metavar='') cmds = ( dict( name='list', help="List records of different types", args=[ dict(name='--type', help="List records of type:\n " + "\n ".join(list_choices) + "\n\nWhen listing SNAPSHOTS, it requires --fs .", metavar='', choices=list_choices, type=str.upper), ], optional=[ dict(sys_id_filter_opt), dict(pool_id_filter_opt), dict(vol_id_filter_opt), dict(disk_id_filter_opt), dict(ag_id_filter_opt), dict(fs_id_opt), dict(nfs_export_id_filter_opt), dict(tgt_id_opt), ], ), dict( name='job-status', help='Retrieve information about a job', args=[ dict(name="--job", metavar="", help='job status id'), ], ), dict( name='capabilities', help='Retrieves array capabilities', args=[ dict(sys_id_opt), ], ), dict( name='plugin-info', help='Retrieves plugin description and version', ), dict( name='volume-create', help='Creates a volume (logical unit)', args=[ dict(name="--name", help='volume name', metavar=''), dict(size_opt), dict(pool_id_opt), ], optional=[ dict(name="--provisioning", help=provision_help, default='DEFAULT', choices=provision_types, type=str.upper), ], ), dict( name='volume-raid-create', help='Creates a RAIDed volume on hardware RAID', args=[ dict(name="--name", help='volume name', metavar=''), dict(name="--disk", metavar='', help='Free disks for new RAIDed volume.\n' 'This is repeatable argument.', action='append'), dict(name="--raid-type", help="RAID type for the new RAID group. " "Should be one of these:\n %s" % "\n ". join(VolumeRAIDInfo.VOL_CREATE_RAID_TYPES_STR), choices=VolumeRAIDInfo.VOL_CREATE_RAID_TYPES_STR, type=str.upper), ], optional=[ dict(name="--strip-size", help="Strip size. " + size_help), ], ), dict( name='volume-raid-create-cap', help='Query capablity of creating a RAIDed volume on hardware RAID', args=[ dict(sys_id_opt), ], ), dict( name='volume-delete', help='Deletes a volume given its id', args=[ dict(vol_id_opt), ], ), dict( name='volume-resize', help='Re-sizes a volume', args=[ dict(vol_id_opt), dict(name='--size', metavar='', help="New size. %s" % size_help), ], ), dict( name='volume-replicate', help='Creates a new volume and replicates provided volume to it.', args=[ dict(vol_id_opt), dict(name="--name", metavar='', help='The name for New replicated volume'), dict(name="--rep-type", metavar='', help=replicate_help, choices=replicate_types), ], optional=[ dict(name="--pool", help='Pool ID to contain the new volume.\nBy default, ' 'new volume will be created in the same pool.'), ], ), dict( name='volume-replicate-range', help='Replicates a portion of a volume', args=[ dict(name="--src-vol", metavar='', help='Source volume id'), dict(name="--dst-vol", metavar='', help='Destination volume id'), dict(name="--rep-type", metavar='', help=replicate_help, choices=replicate_types), dict(name="--src-start", metavar='', help='Source volume start block number.\n' 'This is repeatable argument.', action='append'), dict(name="--dst-start", metavar='', help='Destination volume start block number.\n' 'This is repeatable argument.', action='append'), dict(name="--count", metavar='', help='Number of blocks to replicate.\n' 'This is repeatable argument.', action='append'), ], ), dict( name='volume-replicate-range-block-size', help='Size of each replicated block on a system in bytes', args=[ dict(sys_id_opt), ], ), dict( name='volume-dependants', help='Returns True if volume has a dependant child, like replication', args=[ dict(vol_id_opt), ], ), dict( name='volume-dependants-rm', help='Removes dependencies', args=[ dict(vol_id_opt), ], ), dict( name='volume-access-group', help='Lists the access group(s) that have access to volume', args=[ dict(vol_id_opt), ], ), dict( name='volume-mask', help='Grants access to an access group to a volume, ' 'like LUN Masking', args=[ dict(vol_id_opt), dict(ag_id_opt), ], ), dict( name='volume-unmask', help='Revoke the access of specified access group to a volume', args=[ dict(ag_id_opt), dict(vol_id_opt), ], ), dict( name='volume-enable', help='Enable block access of a volume', args=[ dict(vol_id_opt), ], ), dict( name='volume-disable', help='Disable block access of a volume', args=[ dict(vol_id_opt), ], ), dict( name='volume-raid-info', help='Query volume RAID infomation', args=[ dict(vol_id_opt), ], ), dict( name='pool-member-info', help='Query Pool membership infomation', args=[ dict(pool_id_opt), ], ), dict( name='access-group-create', help='Create an access group', args=[ dict(name='--name', metavar='', help="Human readable name for access group"), # TODO: _client.py access_group_create should support multiple # initiators when creating. dict(init_id_opt), dict(sys_id_opt), ], ), dict( name='access-group-add', help='Add an initiator into existing access group', args=[ dict(ag_id_opt), dict(init_id_opt), ], ), dict( name='access-group-remove', help='Remove an initiator from existing access group', args=[ dict(ag_id_opt), dict(init_id_opt), ], ), dict( name='access-group-delete', help='Deletes an access group', args=[ dict(ag_id_opt), ], ), dict( name='access-group-volumes', help='Lists the volumes that the access group has' ' been granted access to', args=[ dict(ag_id_opt), ], ), dict( name='iscsi-chap', help='Configures iSCSI inbound/outbound CHAP authentication', args=[ dict(init_id_opt), ], optional=[ dict(name="--in-user", metavar='', help='Inbound chap user name'), dict(name="--in-pass", metavar='', help='Inbound chap password'), dict(name="--out-user", metavar='', help='Outbound chap user name'), dict(name="--out-pass", metavar='', help='Outbound chap password'), ], ), dict( name='fs-create', help='Creates a file system', args=[ dict(name="--name", metavar='', help='name of the file system'), dict(size_opt), dict(pool_id_opt), ], ), dict( name='fs-delete', help='Delete a filesystem', args=[ dict(fs_id_opt) ], ), dict( name='fs-resize', help='Re-sizes a filesystem', args=[ dict(fs_id_opt), dict(name="--size", metavar="", help="New size. %s" % size_help), ], ), dict( name='fs-export', help='Export a filesystem via NFS.', args=[ dict(fs_id_opt), ], optional=[ dict(name="--exportpath", metavar='', help="NFS server export path. e.g. '/foo/bar'."), dict(name="--anonuid", metavar='', help='UID(User ID) to map to anonymous user', default=NfsExport.ANON_UID_GID_NA, type=long), dict(name="--anongid", metavar='', help='GID(Group ID) to map to anonymous user', default=NfsExport.ANON_UID_GID_NA, type=long), dict(name="--auth-type", metavar='', help='NFS client authentication type'), dict(name="--root-host", metavar='', help="The host/IP has root access.\n" "This is repeatable argument.", action='append', default=[]), dict(name="--ro-host", metavar='', help="The host/IP has readonly access.\n" "This is repeatable argument.", action='append', default=[]), dict(name="--rw-host", metavar='', help="The host/IP has readwrite access.\n" "This is repeatable argument.", action='append', default=[]), ], ), dict( name='fs-unexport', help='Remove an NFS export', args=[ dict(export_id_opt), ], ), dict( name='fs-clone', help='Creates a file system clone', args=[ dict(name="--src-fs", metavar='', help='The ID of existing source file system.'), dict(name="--dst-name", metavar='', help='The name for newly created destination file system.'), ], optional=[ dict(name="--backing-snapshot", metavar='', help='backing snapshot id'), ], ), dict( name='fs-snap-create', help='Creates a snapshot', args=[ dict(name="--name", metavar="", help='The human friendly name of new snapshot'), dict(fs_id_opt), ], ), dict( name='fs-snap-delete', help='Deletes a snapshot', args=[ dict(snap_id_opt), dict(fs_id_opt), # TODO: why we need filesystem ID? ], ), dict( name='fs-snap-restore', help='Restores a FS or specified files to ' 'previous snapshot state', args=[ dict(snap_id_opt), dict(fs_id_opt), ], optional=[ dict(name="--file", metavar="", help="Only restore provided file\n" "Without this argument, all files will be restored\n" "This is a repeatable argument.", action='append', default=[]), dict(name="--fileas", metavar="", help="store restore file name to another name.\n" "This is a repeatable argument.", action='append', default=[]), ], ), dict( name='fs-dependants', help='Returns True if filesystem has a child ' 'dependency(clone/snapshot) exists', args=[ dict(fs_id_opt), ], optional=[ dict(name="--file", metavar="", action="append", default=[], help="For file check\nThis is a repeatable argument."), ], ), dict( name='fs-dependants-rm', help='Removes file system dependencies', args=[ dict(fs_id_opt), ], optional=[ dict(name="--file", action='append', default=[], help='File or files to remove dependencies for.\n' "This is a repeatable argument.",), ], ), dict( name='file-clone', help='Creates a clone of a file (thin provisioned)', args=[ dict(fs_id_opt), dict(name="--src", metavar="", help='source file to clone (relative path)\n' "This is a repeatable argument.",), dict(name="--dst", metavar="", help='Destination file (relative path)' ", this is a repeatable argument."), ], optional=[ dict(name="--backing-snapshot", help='backing snapshot id'), ], ), ) aliases = ( ['ls', 'list --type systems'], ['lp', 'list --type pools'], ['lv', 'list --type volumes'], ['ld', 'list --type disks'], ['la', 'list --type access_groups'], ['lf', 'list --type fs'], ['lt', 'list --type target_ports'], ['c', 'capabilities'], ['p', 'plugin-info'], ['vc', 'volume-create'], ['vrc', 'volume-raid-create'], ['vrcc', 'volume-raid-create-cap'], ['vd', 'volume-delete'], ['vr', 'volume-resize'], ['vm', 'volume-mask'], ['vu', 'volume-unmask'], ['ve', 'volume-enable'], ['vi', 'volume-disable'], ['ac', 'access-group-create'], ['aa', 'access-group-add'], ['ar', 'access-group-remove'], ['ad', 'access-group-delete'], ['vri', 'volume-raid-info'], ['pmi', 'pool-member-info'], ) ## Class that encapsulates the command line arguments for lsmcli # Note: This class is used by lsmcli and any python plug-ins. class CmdLine: """ Command line interface class. """ ## # Warn of imminent data loss # @param deleting Indicate data will be lost vs. may be lost # (re-size) # @return True if operation confirmed, else False def confirm_prompt(self, deleting): """ Give the user a chance to bail. """ if not self.args.force: msg = "will" if deleting else "may" out("Warning: You are about to do an operation that %s cause data " "to be lost!\nPress [Y|y] to continue, any other key to abort" % msg) pressed = getch() if pressed.upper() == 'Y': return True else: out('Operation aborted!') return False else: return True ## # Tries to make the output better when it varies considerably from # plug-in to plug-in. # @param objects Data, first row is header all other data. def display_data(self, objects): display_all = False if len(objects) == 0: return display_way = DisplayData.DISPLAY_WAY_DEFAULT flag_with_header = True if self.args.sep: flag_with_header = False if self.args.header: flag_with_header = True if self.args.script: display_way = DisplayData.DISPLAY_WAY_SCRIPT DisplayData.display_data( objects, display_way=display_way, flag_human=self.args.human, flag_enum=self.args.enum, splitter=self.args.sep, flag_with_header=flag_with_header, flag_dsp_all_data=display_all) def display_available_plugins(self): d = [] sep = '<}{>' plugins = Client.available_plugins(sep) for p in plugins: desc, version = p.split(sep) d.append(PlugData(desc, version)) self.display_data(d) def handle_alias(self, args): cmd_arguments = args.cmd cmd_arguments.extend(self.unknown_args) new_args = self.parser.parse_args(cmd_arguments) new_args.func(new_args) ## All the command line arguments and options are created in this method def cli(self): """ Command line interface parameters """ parent_parser = ArgumentParser(add_help=False) parent_parser.add_argument( '-v', '--version', action='version', version="%s %s" % (sys.argv[0], VERSION)) parent_parser.add_argument( '-u', '--uri', action="store", type=str, metavar='', dest="uri", help='Uniform resource identifier (env LSMCLI_URI)') parent_parser.add_argument( '-P', '--prompt', action="store_true", dest="prompt", help='Prompt for password (env LSMCLI_PASSWORD)') parent_parser.add_argument( '-H', '--human', action="store_true", dest="human", help='Print sizes in human readable format\n' '(e.g., MiB, GiB, TiB)') parent_parser.add_argument( '-t', '--terse', action="store", dest="sep", metavar='', help='Print output in terse form with "SEP" ' 'as a record separator') parent_parser.add_argument( '-e', '--enum', action="store_true", dest="enum", default=False, help='Display enumerated types as numbers instead of text') parent_parser.add_argument( '-f', '--force', action="store_true", dest="force", default=False, help='Bypass confirmation prompt for data loss operations') parent_parser.add_argument( '-w', '--wait', action="store", type=int, dest="wait", default=30000, help="Command timeout value in ms (default = 30s)") parent_parser.add_argument( '--header', action="store_true", dest="header", help='Include the header with terse') parent_parser.add_argument( '-b', action="store_true", dest="async", default=False, help='Run the command async. Instead of waiting for completion.\n ' 'Command will exit(7) and job id written to stdout.') parent_parser.add_argument( '-s', '--script', action="store_true", dest="script", default=False, help='Displaying data in script friendly way.') parser = ArgumentParser( description='The libStorageMgmt command line interface.' ' Run %(prog)s -h for more on each command.', epilog='Copyright 2012-2015 Red Hat, Inc.\n' 'Please report bugs to ' '\n', formatter_class=RawTextHelpFormatter, parents=[parent_parser]) subparsers = parser.add_subparsers(metavar="command") # Walk the command list and add all of them to the parser for cmd in cmds: sub_parser = subparsers.add_parser( cmd['name'], help=cmd['help'], parents=[parent_parser], formatter_class=RawTextHelpFormatter) group = sub_parser.add_argument_group("cmd required arguments") for arg in cmd.get('args', []): name = arg['name'] del arg['name'] group.add_argument(name, required=True, **arg) group = sub_parser.add_argument_group("cmd optional arguments") for arg in cmd.get('optional', []): flags = arg['name'] del arg['name'] if not isinstance(flags, tuple): flags = (flags,) group.add_argument(*flags, **arg) sub_parser.set_defaults( func=getattr(self, cmd['name'].replace("-", "_"))) for alias in aliases: sub_parser = subparsers.add_parser( alias[0], help="Alias of '%s'" % alias[1], parents=[parent_parser], formatter_class=RawTextHelpFormatter, add_help=False) sub_parser.set_defaults( cmd=alias[1].split(" "), func=self.handle_alias) self.parser = parser known_agrs, self.unknown_args = parser.parse_known_args() return known_agrs ## Display the types of nfs client authentication that are supported. # @return None def display_nfs_client_authentication(self): """ Dump the supported nfs client authentication types """ if self.args.sep: out(self.args.sep.join(self.c.export_auth())) else: out(", ".join(self.c.export_auth())) ## Method that calls the appropriate method based on what the list type is # @param args Argparse argument object def list(self, args): search_key = None search_value = None if args.sys: search_key = 'system_id' search_value = args.sys if args.pool: search_key = 'pool_id' search_value = args.pool if args.vol: search_key = 'volume_id' search_value = args.vol if args.disk: search_key = 'disk_id' search_value = args.disk if args.ag: search_key = 'access_group_id' search_value = args.ag if args.fs: search_key = 'fs_id' search_value = args.ag if args.nfs_export: search_key = 'nfs_export_id' search_value = args.nfs_export if args.tgt: search_key = 'tgt_port_id' search_value = args.tgt if args.type == 'VOLUMES': if search_key == 'volume_id': search_key = 'id' if search_key == 'access_group_id': lsm_ag = _get_item(self.c.access_groups(), args.ag, "Access Group", raise_error=False) if lsm_ag: return self.display_data( self.c.volumes_accessible_by_access_group(lsm_ag)) else: return self.display_data([]) elif search_key and search_key not in Volume.SUPPORTED_SEARCH_KEYS: raise ArgError("Search key '%s' is not supported by " "volume listing." % search_key) self.display_data(self.c.volumes(search_key, search_value)) elif args.type == 'POOLS': if search_key == 'pool_id': search_key = 'id' if search_key and search_key not in Pool.SUPPORTED_SEARCH_KEYS: raise ArgError("Search key '%s' is not supported by " "pool listing." % search_key) self.display_data( self.c.pools(search_key, search_value)) elif args.type == 'FS': if search_key == 'fs_id': search_key = 'id' if search_key and \ search_key not in FileSystem.SUPPORTED_SEARCH_KEYS: raise ArgError("Search key '%s' is not supported by " "volume listing." % search_key) self.display_data(self.c.fs(search_key, search_value)) elif args.type == 'SNAPSHOTS': if args.fs is None: raise ArgError("--fs required") fs = _get_item(self.c.fs(), args.fs, 'File System') self.display_data(self.c.fs_snapshots(fs)) elif args.type == 'EXPORTS': if search_key == 'nfs_export_id': search_key = 'id' if search_key and \ search_key not in NfsExport.SUPPORTED_SEARCH_KEYS: raise ArgError("Search key '%s' is not supported by " "NFS Export listing" % search_key) self.display_data(self.c.exports(search_key, search_value)) elif args.type == 'NFS_CLIENT_AUTH': self.display_nfs_client_authentication() elif args.type == 'ACCESS_GROUPS': if search_key == 'access_group_id': search_key = 'id' if search_key == 'volume_id': lsm_vol = _get_item(self.c.volumes(), args.vol, "Volume", raise_error=False) if lsm_vol: return self.display_data( self.c.access_groups_granted_to_volume(lsm_vol)) else: return self.display_data([]) elif (search_key and search_key not in AccessGroup.SUPPORTED_SEARCH_KEYS): raise ArgError("Search key '%s' is not supported by " "Access Group listing" % search_key) self.display_data( self.c.access_groups(search_key, search_value)) elif args.type == 'SYSTEMS': if search_key: raise ArgError("System listing with search is not supported") self.display_data(self.c.systems()) elif args.type == 'DISKS': if search_key == 'disk_id': search_key = 'id' if search_key and search_key not in Disk.SUPPORTED_SEARCH_KEYS: raise ArgError("Search key '%s' is not supported by " "disk listing" % search_key) self.display_data( self.c.disks(search_key, search_value)) elif args.type == 'TARGET_PORTS': if search_key == 'tgt_port_id': search_key = 'id' if search_key and \ search_key not in TargetPort.SUPPORTED_SEARCH_KEYS: raise ArgError("Search key '%s' is not supported by " "target port listing" % search_key) self.display_data( self.c.target_ports(search_key, search_value)) elif args.type == 'PLUGINS': self.display_available_plugins() else: raise ArgError("unsupported listing type=%s" % args.type) ## Creates an access group. def access_group_create(self, args): system = _get_item(self.c.systems(), args.sys, "System") (init_id, init_type) = parse_convert_init(args.init) access_group = self.c.access_group_create(args.name, init_id, init_type, system) self.display_data([access_group]) def _add_rm_access_grp_init(self, args, op): lsm_ag = _get_item(self.c.access_groups(), args.ag, "Access Group") (init_id, init_type) = parse_convert_init(args.init) if op: return self.c.access_group_initiator_add(lsm_ag, init_id, init_type) else: return self.c.access_group_initiator_delete(lsm_ag, init_id, init_type) ## Adds an initiator from an access group def access_group_add(self, args): self.display_data([self._add_rm_access_grp_init(args, True)]) ## Removes an initiator from an access group def access_group_remove(self, args): self.display_data([self._add_rm_access_grp_init(args, False)]) def access_group_volumes(self, args): agl = self.c.access_groups() group = _get_item(agl, args.ag, "Access Group") vols = self.c.volumes_accessible_by_access_group(group) self.display_data(vols) def iscsi_chap(self, args): (init_id, init_type) = parse_convert_init(args.init) if init_type != AccessGroup.INIT_TYPE_ISCSI_IQN: raise ArgError("--init-id %s is not a valid iSCSI IQN" % args.init) self.c.iscsi_chap_auth(init_id, args.in_user, self.args.in_pass, self.args.out_user, self.args.out_pass) def volume_access_group(self, args): vol = _get_item(self.c.volumes(), args.vol, "Volume") groups = self.c.access_groups_granted_to_volume(vol) self.display_data(groups) ## Used to delete access group def access_group_delete(self, args): agl = self.c.access_groups() group = _get_item(agl, args.ag, "Access Group") return self.c.access_group_delete(group) ## Used to delete a file system def fs_delete(self, args): fs = _get_item(self.c.fs(), args.fs, "File System") if self.confirm_prompt(True): self._wait_for_it("fs-delete", self.c.fs_delete(fs), None) ## Used to create a file system def fs_create(self, args): p = _get_item(self.c.pools(), args.pool, "Pool") fs = self._wait_for_it("fs-create", *self.c.fs_create(p, args.name, self._size(args.size))) self.display_data([fs]) ## Used to resize a file system def fs_resize(self, args): fs = _get_item(self.c.fs(), args.fs, "File System") size = self._size(args.size) if self.confirm_prompt(False): fs = self._wait_for_it("fs-resize", *self.c.fs_resize(fs, size)) self.display_data([fs]) ## Used to clone a file system def fs_clone(self, args): src_fs = _get_item( self.c.fs(), args.src_fs, "Source File System") ss = None if args.backing_snapshot: #go get the snapshot ss = _get_item(self.c.fs_snapshots(src_fs), args.backing_snapshot, "Snapshot") fs = self._wait_for_it( "fs_clone", *self.c.fs_clone(src_fs, args.dst_name, ss)) self.display_data([fs]) ## Used to clone a file(s) def file_clone(self, args): fs = _get_item(self.c.fs(), args.fs, "File System") if self.args.backing_snapshot: #go get the snapshot ss = _get_item(self.c.fs_snapshots(fs), args.backing_snapshot, "Snapshot") else: ss = None self._wait_for_it( "fs_file_clone", self.c.fs_file_clone(fs, args.src, args.dst, ss), None) ##Converts a size parameter into the appropriate number of bytes # @param s Size to convert to bytes handles B, K, M, G, T, P postfix # @return Size in bytes @staticmethod def _size(s): size_bytes = size_human_2_size_bytes(s) if size_bytes <= 0: raise ArgError("Incorrect size argument format: '%s'" % s) return size_bytes def _cp(self, cap, val): if self.args.sep is not None: s = self.args.sep else: s = ':' if val: v = "SUPPORTED" else: v = "UNSUPPORTED" out("%s%s%s" % (cap, s, v)) def capabilities(self, args): s = _get_item(self.c.systems(), args.sys, "System") cap = self.c.capabilities(s) sup_caps = sorted(cap.get_supported().values()) all_caps = sorted(cap.get_supported(True).values()) sep = DisplayData.DEFAULT_SPLITTER if self.args.sep is not None: sep = self.args.sep cap_data = OrderedDict() # Show support capabilities first for v in sup_caps: cap_data[v] = 'SUPPORTED' for v in all_caps: if v not in sup_caps: cap_data[v] = 'UNSUPPORTED' DisplayData.display_data_script_way([cap_data], sep) def plugin_info(self, args): desc, version = self.c.plugin_info() if args.sep: out("%s%s%s" % (desc, args.sep, version)) else: out("Description: %s Version: %s" % (desc, version)) ## Creates a volume def volume_create(self, args): #Get pool p = _get_item(self.c.pools(), args.pool, "Pool") vol = self._wait_for_it( "volume-create", *self.c.volume_create( p, args.name, self._size(args.size), vol_provision_str_to_type(args.provisioning))) self.display_data([vol]) ## Creates a snapshot def fs_snap_create(self, args): #Get fs fs = _get_item(self.c.fs(), args.fs, "File System") ss = self._wait_for_it("snapshot-create", *self.c.fs_snapshot_create( fs, args.name)) self.display_data([ss]) ## Restores a snap shot def fs_snap_restore(self, args): #Get snapshot fs = _get_item(self.c.fs(), args.fs, "File System") ss = _get_item(self.c.fs_snapshots(fs), args.snap, "Snapshot") flag_all_files = True if self.args.file: flag_all_files = False if self.args.fileas: if len(self.args.file) != len(self.args.fileas): raise ArgError( "number of --file not equal to --fileas") if self.confirm_prompt(True): self._wait_for_it( 'fs-snap-restore', self.c.fs_snapshot_restore( fs, ss, self.args.file, self.args.fileas, flag_all_files), None) ## Deletes a volume def volume_delete(self, args): v = _get_item(self.c.volumes(), args.vol, "Volume") if self.confirm_prompt(True): self._wait_for_it("volume-delete", self.c.volume_delete(v), None) ## Deletes a snap shot def fs_snap_delete(self, args): fs = _get_item(self.c.fs(), args.fs, "File System") ss = _get_item(self.c.fs_snapshots(fs), args.snap, "Snapshot") if self.confirm_prompt(True): self._wait_for_it("fs_snap_delete", self.c.fs_snapshot_delete(fs, ss), None) ## Waits for an operation to complete by polling for the status of the # operations. # @param msg Message to display if this job fails # @param job The job id to wait on # @param item The item that could be available now if there is no job def _wait_for_it(self, msg, job, item): if not job: return item else: #If a user doesn't want to wait, return the job id to stdout #and exit with job in progress if self.args.async: out(job) self.shutdown(ErrorNumber.JOB_STARTED) while True: (s, percent, item) = self.c.job_status(job) if s == JobStatus.INPROGRESS: #Add an option to spit out progress? #print "%s - Percent %s complete" % (job, percent) time.sleep(0.25) elif s == JobStatus.COMPLETE: self.c.job_free(job) return item else: #Something better to do here? raise ArgError(msg + " job error code= " + str(s)) ## Retrieves the status of the specified job def job_status(self, args): (s, percent, item) = self.c.job_status(args.job) if s == JobStatus.COMPLETE: if item: self.display_data([item]) self.c.job_free(args.job) else: out(str(percent)) self.shutdown(ErrorNumber.JOB_STARTED) ## Replicates a volume def volume_replicate(self, args): p = None if args.pool: p = _get_item(self.c.pools(), args.pool, "Pool") v = _get_item(self.c.volumes(), args.vol, "Volume") rep_type = vol_rep_type_str_to_type(args.rep_type) if rep_type == Volume.REPLICATE_UNKNOWN: raise ArgError("invalid replication type= %s" % rep_type) vol = self._wait_for_it( "replicate volume", *self.c.volume_replicate(p, rep_type, v, args.name)) self.display_data([vol]) ## Replicates a range of a volume def volume_replicate_range(self, args): src = _get_item(self.c.volumes(), args.src_vol, "Source Volume") dst = _get_item(self.c.volumes(), args.dst_vol, "Destination Volume") rep_type = vol_rep_type_str_to_type(args.rep_type) if rep_type == Volume.REPLICATE_UNKNOWN: raise ArgError("invalid replication type= %s" % rep_type) src_starts = args.src_start dst_starts = args.dst_start counts = args.count if not len(src_starts) \ or not (len(src_starts) == len(dst_starts) == len(counts)): raise ArgError("Differing numbers of src_start, dest_start, " "and count parameters") ranges = [] for b in range(len(src_starts)): ranges.append(BlockRange(src_starts[b], dst_starts[b], counts[b])) if self.confirm_prompt(False): self.c.volume_replicate_range(rep_type, src, dst, ranges) ## # Returns the block size in bytes for each block represented in # volume_replicate_range def volume_replicate_range_block_size(self, args): s = _get_item(self.c.systems(), args.sys, "System") out(self.c.volume_replicate_range_block_size(s)) def volume_mask(self, args): vol = _get_item(self.c.volumes(), args.vol, 'Volume') ag = _get_item(self.c.access_groups(), args.ag, 'Access Group') self.c.volume_mask(ag, vol) def volume_unmask(self, args): ag = _get_item(self.c.access_groups(), args.ag, "Access Group") vol = _get_item(self.c.volumes(), args.vol, "Volume") return self.c.volume_unmask(ag, vol) ## Re-sizes a volume def volume_resize(self, args): v = _get_item(self.c.volumes(), args.vol, "Volume") size = self._size(args.size) if self.confirm_prompt(False): vol = self._wait_for_it("resize", *self.c.volume_resize(v, size)) self.display_data([vol]) ## Enable a volume def volume_enable(self, args): v = _get_item(self.c.volumes(), args.vol, "Volume") self.c.volume_enable(v) ## Disable a volume def volume_disable(self, args): v = _get_item(self.c.volumes(), args.vol, "Volume") self.c.volume_disable(v) ## Removes a nfs export def fs_unexport(self, args): export = _get_item(self.c.exports(), args.export, "NFS Export") self.c.export_remove(export) ## Exports a file system as a NFS export def fs_export(self, args): fs = _get_item(self.c.fs(), args.fs, "File System") # Check to see if we have some type of access specified if len(args.rw_host) == 0 \ and len(args.ro_host) == 0: raise ArgError(" please specify --ro-host or --rw-host") export = self.c.export_fs( fs.id, args.exportpath, args.root_host, args.rw_host, args.ro_host, args.anonuid, args.anongid, args.auth_type, None) self.display_data([export]) ## Displays volume dependants. def volume_dependants(self, args): v = _get_item(self.c.volumes(), args.vol, "Volume") rc = self.c.volume_child_dependency(v) out(rc) ## Removes volume dependants. def volume_dependants_rm(self, args): v = _get_item(self.c.volumes(), args.vol, "Volume") self._wait_for_it("volume-dependant-rm", self.c.volume_child_dependency_rm(v), None) def volume_raid_info(self, args): lsm_vol = _get_item(self.c.volumes(), args.vol, "Volume") self.display_data( [ VolumeRAIDInfo( lsm_vol.id, *self.c.volume_raid_info(lsm_vol))]) def pool_member_info(self, args): lsm_pool = _get_item(self.c.pools(), args.pool, "Pool") self.display_data( [ PoolRAIDInfo( lsm_pool.id, *self.c.pool_member_info(lsm_pool))]) def volume_raid_create(self, args): raid_type = VolumeRAIDInfo.raid_type_str_to_lsm(args.raid_type) all_lsm_disks = self.c.disks() lsm_disks = [d for d in all_lsm_disks if d.id in args.disk] if len(lsm_disks) != len(args.disk): raise LsmError( ErrorNumber.NOT_FOUND_DISK, "Disk ID %s not found" % ', '.join(set(args.disk) - set(d.id for d in all_lsm_disks))) busy_disks = [d.id for d in lsm_disks if not d.status & Disk.STATUS_FREE] if len(busy_disks) >= 1: raise LsmError( ErrorNumber.DISK_NOT_FREE, "Disk %s is not free" % ", ".join(busy_disks)) if args.strip_size: strip_size = size_human_2_size_bytes(args.strip_size) else: strip_size = Volume.VCR_STRIP_SIZE_DEFAULT self.display_data([ self.c.volume_raid_create( args.name, raid_type, lsm_disks, strip_size)]) def volume_raid_create_cap(self, args): lsm_sys = _get_item(self.c.systems(), args.sys, "System") self.display_data([ VcrCap(lsm_sys.id, *self.c.volume_raid_create_cap_get(lsm_sys))]) ## Displays file system dependants def fs_dependants(self, args): fs = _get_item(self.c.fs(), args.fs, "File System") rc = self.c.fs_child_dependency(fs, args.file) out(rc) ## Removes file system dependants def fs_dependants_rm(self, args): fs = _get_item(self.c.fs(), args.fs, "File System") self._wait_for_it("fs-dependants-rm", self.c.fs_child_dependency_rm(fs, args.file), None) def _read_configfile(self): """ Set uri from config file. Will be overridden by cmdline option or env var if present. """ allowed_config_options = ("uri",) config_path = os.path.expanduser("~") + "/.lsmcli" if not os.path.exists(config_path): return with open(config_path) as f: for line in f: if line.lstrip().startswith("#"): continue try: name, val = [x.strip() for x in line.split("=", 1)] if name in allowed_config_options: setattr(self, name, val) except ValueError: pass ## Class constructor. def __init__(self): self.uri = None self.c = None self.parser = None self.unknown_args = None self.args = self.cli() self.cleanup = None self.tmo = int(self.args.wait) if not self.tmo or self.tmo < 0: raise ArgError("[-w|--wait] requires a non-zero positive integer") self._read_configfile() if os.getenv('LSMCLI_URI') is not None: self.uri = os.getenv('LSMCLI_URI') self.password = os.getenv('LSMCLI_PASSWORD') if self.args.uri is not None: self.uri = self.args.uri if self.uri is None: # We need a valid plug-in to instantiate even if all we are trying # to do is list the plug-ins at the moment to keep that code # the same in all cases, even though it isn't technically # required for the client library (static method) # TODO: Make this not necessary. if ('type' in self.args and self.args.type == "PLUGINS"): self.uri = "sim://" self.password = None else: raise ArgError("--uri missing or export LSMCLI_URI") # Lastly get the password if requested. if self.args.prompt: self.password = getpass.getpass() if self.password is not None: #Check for username u = uri_parse(self.uri) if u['username'] is None: raise ArgError("password specified with no user name in uri") ## Does appropriate clean-up # @param ec The exit code def shutdown(self, ec=None): if self.cleanup: self.cleanup() if ec: sys.exit(ec) ## Process the specified command # @param cli The object instance to invoke methods on. def process(self, cli=None): """ Process the parsed command. """ if cli: #Directly invoking code though a wrapper to catch unsupported #operations. self.c = Proxy(cli()) self.c.plugin_register(self.uri, self.password, self.tmo) self.cleanup = self.c.plugin_unregister else: #Going across the ipc pipe self.c = Proxy(Client(self.uri, self.password, self.tmo)) if os.getenv('LSM_DEBUG_PLUGIN'): raw_input( "Attach debugger to plug-in, press when ready...") self.cleanup = self.c.close self.args.func(self.args) self.shutdown() libstoragemgmt-1.2.3/tools/lsmcli/Makefile.am0000664000175000017500000000020012537546123016137 00000000000000dist_bin_SCRIPTS = lsmcli lsmclidir = $(pythondir)/lsm/lsmcli lsmcli_PYTHON = \ data_display.py \ cmdline.py \ __init__.py libstoragemgmt-1.2.3/tools/lsmcli/__init__.py0000664000175000017500000000004512537546123016223 00000000000000from cmdline import cmd_line_wrapper libstoragemgmt-1.2.3/tools/lsmcli/Makefile.in0000664000175000017500000004345512542455445016175 00000000000000# Makefile.in generated by automake 1.13.4 from Makefile.am. # @configure_input@ # Copyright (C) 1994-2013 Free Software Foundation, Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__is_gnu_make = test -n '$(MAKEFILE_LIST)' && test -n '$(MAKELEVEL)' am__make_running_with_option = \ case $${target_option-} in \ ?) ;; \ *) echo "am__make_running_with_option: internal error: invalid" \ "target option '$${target_option-}' specified" >&2; \ exit 1;; \ esac; \ has_opt=no; \ sane_makeflags=$$MAKEFLAGS; \ if $(am__is_gnu_make); then \ sane_makeflags=$$MFLAGS; \ else \ case $$MAKEFLAGS in \ *\\[\ \ ]*) \ bs=\\; \ sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \ | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \ esac; \ fi; \ skip_next=no; \ strip_trailopt () \ { \ flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \ }; \ for flg in $$sane_makeflags; do \ test $$skip_next = yes && { skip_next=no; continue; }; \ case $$flg in \ *=*|--*) continue;; \ -*I) strip_trailopt 'I'; skip_next=yes;; \ -*I?*) strip_trailopt 'I';; \ -*O) strip_trailopt 'O'; skip_next=yes;; \ -*O?*) strip_trailopt 'O';; \ -*l) strip_trailopt 'l'; skip_next=yes;; \ -*l?*) strip_trailopt 'l';; \ -[dEDm]) skip_next=yes;; \ -[JT]) skip_next=yes;; \ esac; \ case $$flg in \ *$$target_option*) has_opt=yes; break;; \ esac; \ done; \ test $$has_opt = yes am__make_dryrun = (target_option=n; $(am__make_running_with_option)) am__make_keepgoing = (target_option=k; $(am__make_running_with_option)) pkgdatadir = $(datadir)/@PACKAGE@ pkgincludedir = $(includedir)/@PACKAGE@ pkglibdir = $(libdir)/@PACKAGE@ pkglibexecdir = $(libexecdir)/@PACKAGE@ am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ subdir = tools/lsmcli DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/Makefile.am \ $(dist_bin_SCRIPTS) $(lsmcli_PYTHON) \ $(top_srcdir)/build-aux/py-compile ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = $(top_srcdir)/m4/ax_python_module.m4 \ $(top_srcdir)/m4/libtool.m4 $(top_srcdir)/m4/ltoptions.m4 \ $(top_srcdir)/m4/ltsugar.m4 $(top_srcdir)/m4/ltversion.m4 \ $(top_srcdir)/m4/lt~obsolete.m4 $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) mkinstalldirs = $(install_sh) -d CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = CONFIG_CLEAN_VPATH_FILES = am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; am__vpath_adj = case $$p in \ $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ *) f=$$p;; \ esac; am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`; am__install_max = 40 am__nobase_strip_setup = \ srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'` am__nobase_strip = \ for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||" am__nobase_list = $(am__nobase_strip_setup); \ for p in $$list; do echo "$$p $$p"; done | \ sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \ $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \ if (++n[$$2] == $(am__install_max)) \ { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \ END { for (dir in files) print dir, files[dir] }' am__base_list = \ sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \ sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g' am__uninstall_files_from_dir = { \ test -z "$$files" \ || { test ! -d "$$dir" && test ! -f "$$dir" && test ! -r "$$dir"; } \ || { echo " ( cd '$$dir' && rm -f" $$files ")"; \ $(am__cd) "$$dir" && rm -f $$files; }; \ } am__installdirs = "$(DESTDIR)$(bindir)" "$(DESTDIR)$(lsmclidir)" SCRIPTS = $(dist_bin_SCRIPTS) AM_V_P = $(am__v_P_@AM_V@) am__v_P_ = $(am__v_P_@AM_DEFAULT_V@) am__v_P_0 = false am__v_P_1 = : AM_V_GEN = $(am__v_GEN_@AM_V@) am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@) am__v_GEN_0 = @echo " GEN " $@; am__v_GEN_1 = AM_V_at = $(am__v_at_@AM_V@) am__v_at_ = $(am__v_at_@AM_DEFAULT_V@) am__v_at_0 = @ am__v_at_1 = SOURCES = DIST_SOURCES = am__can_run_installinfo = \ case $$AM_UPDATE_INFO_DIR in \ n|no|NO) false;; \ *) (install-info --version) >/dev/null 2>&1;; \ esac am__py_compile = PYTHON=$(PYTHON) $(SHELL) $(py_compile) am__pep3147_tweak = \ sed -e 's|\.py$$||' -e 's|[^/]*$$|__pycache__/&.*.py|' py_compile = $(top_srcdir)/build-aux/py-compile am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) ACLOCAL = @ACLOCAL@ AMTAR = @AMTAR@ AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@ AR = @AR@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ FGREP = @FGREP@ GREP = @GREP@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ JSON_CFLAGS = @JSON_CFLAGS@ JSON_LIBS = @JSON_LIBS@ LD = @LD@ LDFLAGS = @LDFLAGS@ LIBCHECK_CFLAGS = @LIBCHECK_CFLAGS@ LIBCHECK_LIBS = @LIBCHECK_LIBS@ LIBCONFIG_CFLAGS = @LIBCONFIG_CFLAGS@ LIBCONFIG_LIBS = @LIBCONFIG_LIBS@ LIBGLIB_CFLAGS = @LIBGLIB_CFLAGS@ LIBGLIB_LIBS = @LIBGLIB_LIBS@ LIBMICROHTTPD_CFLAGS = @LIBMICROHTTPD_CFLAGS@ LIBMICROHTTPD_LIBS = @LIBMICROHTTPD_LIBS@ LIBOBJS = @LIBOBJS@ LIBS = @LIBS@ LIBSM_LIBTOOL_VERSION = @LIBSM_LIBTOOL_VERSION@ LIBSM_MAJOR_VERSION = @LIBSM_MAJOR_VERSION@ LIBSM_MICRO_VERSION = @LIBSM_MICRO_VERSION@ LIBSM_MINOR_VERSION = @LIBSM_MINOR_VERSION@ LIBSM_VERSION = @LIBSM_VERSION@ LIBSM_VERSION_INFO = @LIBSM_VERSION_INFO@ LIBSM_VERSION_NUMBER = @LIBSM_VERSION_NUMBER@ LIBTOOL = @LIBTOOL@ LIBXML_CFLAGS = @LIBXML_CFLAGS@ LIBXML_LIBS = @LIBXML_LIBS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBOBJS = @LTLIBOBJS@ MAINT = @MAINT@ MAKEINFO = @MAKEINFO@ MANIFEST_TOOL = @MANIFEST_TOOL@ MKDIR_P = @MKDIR_P@ NM = @NM@ NMEDIT = @NMEDIT@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_URL = @PACKAGE_URL@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PERL = @PERL@ PKG_CONFIG = @PKG_CONFIG@ PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@ PKG_CONFIG_PATH = @PKG_CONFIG_PATH@ PYTHON = @PYTHON@ PYTHON_EXEC_PREFIX = @PYTHON_EXEC_PREFIX@ PYTHON_PLATFORM = @PYTHON_PLATFORM@ PYTHON_PREFIX = @PYTHON_PREFIX@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SSL_LIBS = @SSL_LIBS@ STRIP = @STRIP@ VERSION = @VERSION@ YAJL_LIBS = @YAJL_LIBS@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_AR = @ac_ct_AR@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ bashcompletiondir = @bashcompletiondir@ bindir = @bindir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ datadir = @datadir@ datarootdir = @datarootdir@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ install_sh = @install_sh@ libdir = @libdir@ libexecdir = @libexecdir@ localedir = @localedir@ localstatedir = @localstatedir@ mandir = @mandir@ mkdir_p = @mkdir_p@ oldincludedir = @oldincludedir@ pdfdir = @pdfdir@ pkgpyexecdir = @pkgpyexecdir@ pkgpythondir = @pkgpythondir@ prefix = @prefix@ program_transform_name = @program_transform_name@ psdir = @psdir@ pyexecdir = @pyexecdir@ pythondir = @pythondir@ sbindir = @sbindir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ systemdsystemunitdir = @systemdsystemunitdir@ target_alias = @target_alias@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ dist_bin_SCRIPTS = lsmcli lsmclidir = $(pythondir)/lsm/lsmcli lsmcli_PYTHON = \ data_display.py \ cmdline.py \ __init__.py all: all-am .SUFFIXES: $(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu tools/lsmcli/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --gnu tools/lsmcli/Makefile .PRECIOUS: Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): install-dist_binSCRIPTS: $(dist_bin_SCRIPTS) @$(NORMAL_INSTALL) @list='$(dist_bin_SCRIPTS)'; test -n "$(bindir)" || list=; \ if test -n "$$list"; then \ echo " $(MKDIR_P) '$(DESTDIR)$(bindir)'"; \ $(MKDIR_P) "$(DESTDIR)$(bindir)" || exit 1; \ fi; \ for p in $$list; do \ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ if test -f "$$d$$p"; then echo "$$d$$p"; echo "$$p"; else :; fi; \ done | \ sed -e 'p;s,.*/,,;n' \ -e 'h;s|.*|.|' \ -e 'p;x;s,.*/,,;$(transform)' | sed 'N;N;N;s,\n, ,g' | \ $(AWK) 'BEGIN { files["."] = ""; dirs["."] = 1; } \ { d=$$3; if (dirs[d] != 1) { print "d", d; dirs[d] = 1 } \ if ($$2 == $$4) { files[d] = files[d] " " $$1; \ if (++n[d] == $(am__install_max)) { \ print "f", d, files[d]; n[d] = 0; files[d] = "" } } \ else { print "f", d "/" $$4, $$1 } } \ END { for (d in files) print "f", d, files[d] }' | \ while read type dir files; do \ if test "$$dir" = .; then dir=; else dir=/$$dir; fi; \ test -z "$$files" || { \ echo " $(INSTALL_SCRIPT) $$files '$(DESTDIR)$(bindir)$$dir'"; \ $(INSTALL_SCRIPT) $$files "$(DESTDIR)$(bindir)$$dir" || exit $$?; \ } \ ; done uninstall-dist_binSCRIPTS: @$(NORMAL_UNINSTALL) @list='$(dist_bin_SCRIPTS)'; test -n "$(bindir)" || exit 0; \ files=`for p in $$list; do echo "$$p"; done | \ sed -e 's,.*/,,;$(transform)'`; \ dir='$(DESTDIR)$(bindir)'; $(am__uninstall_files_from_dir) mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs install-lsmcliPYTHON: $(lsmcli_PYTHON) @$(NORMAL_INSTALL) @list='$(lsmcli_PYTHON)'; dlist=; list2=; test -n "$(lsmclidir)" || list=; \ if test -n "$$list"; then \ echo " $(MKDIR_P) '$(DESTDIR)$(lsmclidir)'"; \ $(MKDIR_P) "$(DESTDIR)$(lsmclidir)" || exit 1; \ fi; \ for p in $$list; do \ if test -f "$$p"; then b=; else b="$(srcdir)/"; fi; \ if test -f $$b$$p; then \ $(am__strip_dir) \ dlist="$$dlist $$f"; \ list2="$$list2 $$b$$p"; \ else :; fi; \ done; \ for file in $$list2; do echo $$file; done | $(am__base_list) | \ while read files; do \ echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(lsmclidir)'"; \ $(INSTALL_DATA) $$files "$(DESTDIR)$(lsmclidir)" || exit $$?; \ done || exit $$?; \ if test -n "$$dlist"; then \ $(am__py_compile) --destdir "$(DESTDIR)" \ --basedir "$(lsmclidir)" $$dlist; \ else :; fi uninstall-lsmcliPYTHON: @$(NORMAL_UNINSTALL) @list='$(lsmcli_PYTHON)'; test -n "$(lsmclidir)" || list=; \ py_files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ test -n "$$py_files" || exit 0; \ dir='$(DESTDIR)$(lsmclidir)'; \ pyc_files=`echo "$$py_files" | sed 's|$$|c|'`; \ pyo_files=`echo "$$py_files" | sed 's|$$|o|'`; \ py_files_pep3147=`echo "$$py_files" | $(am__pep3147_tweak)`; \ echo "$$py_files_pep3147";\ pyc_files_pep3147=`echo "$$py_files_pep3147" | sed 's|$$|c|'`; \ pyo_files_pep3147=`echo "$$py_files_pep3147" | sed 's|$$|o|'`; \ st=0; \ for files in \ "$$py_files" \ "$$pyc_files" \ "$$pyo_files" \ "$$pyc_files_pep3147" \ "$$pyo_files_pep3147" \ ; do \ $(am__uninstall_files_from_dir) || st=$$?; \ done; \ exit $$st tags TAGS: ctags CTAGS: cscope cscopelist: distdir: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done check-am: all-am check: check-am all-am: Makefile $(SCRIPTS) installdirs: for dir in "$(DESTDIR)$(bindir)" "$(DESTDIR)$(lsmclidir)"; do \ test -z "$$dir" || $(MKDIR_P) "$$dir"; \ done install: install-am install-exec: install-exec-am install-data: install-data-am uninstall: uninstall-am install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-am install-strip: if test -z '$(STRIP)'; then \ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ install; \ else \ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ fi mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-am clean-am: clean-generic clean-libtool mostlyclean-am distclean: distclean-am -rm -f Makefile distclean-am: clean-am distclean-generic dvi: dvi-am dvi-am: html: html-am html-am: info: info-am info-am: install-data-am: install-lsmcliPYTHON install-dvi: install-dvi-am install-dvi-am: install-exec-am: install-dist_binSCRIPTS install-html: install-html-am install-html-am: install-info: install-info-am install-info-am: install-man: install-pdf: install-pdf-am install-pdf-am: install-ps: install-ps-am install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-am -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-am mostlyclean-am: mostlyclean-generic mostlyclean-libtool pdf: pdf-am pdf-am: ps: ps-am ps-am: uninstall-am: uninstall-dist_binSCRIPTS uninstall-lsmcliPYTHON .MAKE: install-am install-strip .PHONY: all all-am check check-am clean clean-generic clean-libtool \ cscopelist-am ctags-am distclean distclean-generic \ distclean-libtool distdir dvi dvi-am html html-am info info-am \ install install-am install-data install-data-am \ install-dist_binSCRIPTS install-dvi install-dvi-am \ install-exec install-exec-am install-html install-html-am \ install-info install-info-am install-lsmcliPYTHON install-man \ install-pdf install-pdf-am install-ps install-ps-am \ install-strip installcheck installcheck-am installdirs \ maintainer-clean maintainer-clean-generic mostlyclean \ mostlyclean-generic mostlyclean-libtool pdf pdf-am ps ps-am \ tags-am uninstall uninstall-am uninstall-dist_binSCRIPTS \ uninstall-lsmcliPYTHON # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: libstoragemgmt-1.2.3/tools/lsmcli/lsmcli0000775000175000017500000000166212537737032015332 00000000000000#!/usr/bin/env python2 # Copyright (C) 2012 Red Hat, Inc. # This library is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2.1 of the License, or any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library; If not, see . # # Author: tasleson from lsm.lsmcli import cmd_line_wrapper ## Command line interface for libStorageMgmt. # This is contained in a separate class which can be shared across all the py # plug-ins. if __name__ == '__main__': cmd_line_wrapper() libstoragemgmt-1.2.3/tools/lsmcli/data_display.py0000664000175000017500000006505412537737032017136 00000000000000# Copyright (C) 2014 Red Hat, Inc. # This library is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2.1 of the License, or any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library; If not, see . # # Author: Gris Ge import sys try: from collections import OrderedDict except ImportError: # python 2.6 or earlier, use backport from ordereddict import OrderedDict from datetime import datetime from lsm import (size_bytes_2_size_human, LsmError, ErrorNumber, System, Pool, Disk, Volume, AccessGroup, FileSystem, FsSnapshot, NfsExport, TargetPort) BIT_MAP_STRING_SPLITTER = ',' ## Users are reporting errors with broken pipe when piping output # to another program. This appears to be related to this issue: # http://bugs.python.org/issue11380 # Unable to reproduce, but hopefully this will address it. # @param msg The message to be written to stdout def out(msg): try: sys.stdout.write(str(msg)) sys.stdout.write("\n") sys.stdout.flush() except IOError: sys.exit(1) def _bit_map_to_str(bit_map, conv_dict): rc = [] bit_map = int(bit_map) for cur_enum in conv_dict.keys(): if cur_enum & bit_map: rc.append(conv_dict[cur_enum]) # If there are no bits set we really don't need a string if bit_map != 0 and len(rc) == 0: return 'Unknown(%s)' % hex(bit_map) return BIT_MAP_STRING_SPLITTER.join(rc) def _enum_type_to_str(int_type, conv_dict): rc = '' int_type = int(int_type) if int_type in conv_dict.keys(): return conv_dict[int_type] return 'Unknown(%d)' % int_type def _str_to_enum(type_str, conv_dict): keys = [k for k, v in conv_dict.items() if v.lower() == type_str.lower()] if len(keys) > 0: return keys[0] raise LsmError(ErrorNumber.INVALID_ARGUMENT, "Failed to convert %s to lsm type" % type_str) _SYSTEM_STATUS_CONV = { System.STATUS_UNKNOWN: 'Unknown', System.STATUS_OK: 'OK', System.STATUS_ERROR: 'Error', System.STATUS_DEGRADED: 'Degraded', System.STATUS_PREDICTIVE_FAILURE: 'Predictive failure', System.STATUS_OTHER: 'Other', } def system_status_to_str(system_status): return _bit_map_to_str(system_status, _SYSTEM_STATUS_CONV) _POOL_STATUS_CONV = { Pool.STATUS_UNKNOWN: 'Unknown', Pool.STATUS_OK: 'OK', Pool.STATUS_OTHER: 'Other', Pool.STATUS_DEGRADED: 'Degraded', Pool.STATUS_ERROR: 'Error', Pool.STATUS_STOPPED: 'Stopped', Pool.STATUS_RECONSTRUCTING: 'Reconstructing', Pool.STATUS_VERIFYING: 'Verifying', Pool.STATUS_INITIALIZING: 'Initializing', Pool.STATUS_GROWING: 'Growing', } def pool_status_to_str(pool_status): return _bit_map_to_str(pool_status, _POOL_STATUS_CONV) _POOL_ELEMENT_TYPE_CONV = { Pool.ELEMENT_TYPE_POOL: 'POOL', Pool.ELEMENT_TYPE_VOLUME: 'VOLUME', Pool.ELEMENT_TYPE_VOLUME_THIN: 'VOLUME_THIN', Pool.ELEMENT_TYPE_VOLUME_FULL: 'VOLUME_FULL', Pool.ELEMENT_TYPE_FS: 'FS', Pool.ELEMENT_TYPE_SYS_RESERVED: 'SYSTEM_RESERVED', Pool.ELEMENT_TYPE_DELTA: "DELTA", } _POOL_UNSUPPORTED_ACTION_CONV = { Pool.UNSUPPORTED_VOLUME_GROW: "Volume Grow", Pool.UNSUPPORTED_VOLUME_SHRINK: "Volume Shrink" } def pool_element_type_to_str(element_type): return _bit_map_to_str(element_type, _POOL_ELEMENT_TYPE_CONV) def pool_unsupported_actions_to_str(unsupported_actions): return _bit_map_to_str(unsupported_actions, _POOL_UNSUPPORTED_ACTION_CONV) _VOL_PROVISION_CONV = { Volume.PROVISION_DEFAULT: 'DEFAULT', Volume.PROVISION_FULL: 'FULL', Volume.PROVISION_THIN: 'THIN', Volume.PROVISION_UNKNOWN: 'UNKNOWN', } def vol_provision_str_to_type(vol_provision_str): return _str_to_enum(vol_provision_str, _VOL_PROVISION_CONV) _VOL_ADMIN_STATE_CONV = { Volume.ADMIN_STATE_DISABLED: 'Yes', Volume.ADMIN_STATE_ENABLED: 'No', } def vol_admin_state_to_str(vol_admin_state): return _enum_type_to_str(vol_admin_state, _VOL_ADMIN_STATE_CONV) _VOL_REP_TYPE_CONV = { Volume.REPLICATE_CLONE: 'CLONE', Volume.REPLICATE_COPY: 'COPY', Volume.REPLICATE_MIRROR_SYNC: 'MIRROR_SYNC', Volume.REPLICATE_MIRROR_ASYNC: 'MIRROR_ASYNC', Volume.REPLICATE_UNKNOWN: 'UNKNOWN', } def vol_rep_type_str_to_type(vol_rep_type_str): return _str_to_enum(vol_rep_type_str, _VOL_REP_TYPE_CONV) _DISK_TYPE_CONV = { Disk.TYPE_UNKNOWN: 'UNKNOWN', Disk.TYPE_OTHER: 'Other', Disk.TYPE_ATA: 'ATA', Disk.TYPE_SATA: 'SATA', Disk.TYPE_SAS: 'SAS', Disk.TYPE_FC: 'FC', Disk.TYPE_SOP: 'SCSI Over PCI-E(SSD)', Disk.TYPE_SCSI: 'SCSI', Disk.TYPE_NL_SAS: 'NL_SAS', Disk.TYPE_HDD: 'HDD', Disk.TYPE_SSD: 'SSD', Disk.TYPE_HYBRID: 'Hybrid', Disk.TYPE_LUN: 'Remote LUN', } def disk_type_to_str(disk_type): return _enum_type_to_str(disk_type, _DISK_TYPE_CONV) _DISK_STATUS_CONV = { Disk.STATUS_UNKNOWN: 'Unknown', Disk.STATUS_OK: 'OK', Disk.STATUS_OTHER: 'Other', Disk.STATUS_PREDICTIVE_FAILURE: 'Predictive failure', Disk.STATUS_ERROR: 'Error', Disk.STATUS_REMOVED: 'Removed', Disk.STATUS_STARTING: 'Starting', Disk.STATUS_STOPPING: 'Stopping', Disk.STATUS_STOPPED: 'Stopped', Disk.STATUS_INITIALIZING: 'Initializing', Disk.STATUS_MAINTENANCE_MODE: 'Maintenance', Disk.STATUS_SPARE_DISK: 'Spare', Disk.STATUS_RECONSTRUCT: 'Reconstruct', Disk.STATUS_FREE: 'Free', } def disk_status_to_str(disk_status): return _bit_map_to_str(disk_status, _DISK_STATUS_CONV) _AG_INIT_TYPE_CONV = { AccessGroup.INIT_TYPE_UNKNOWN: 'Unknown', AccessGroup.INIT_TYPE_OTHER: 'Other', AccessGroup.INIT_TYPE_WWPN: 'WWPN', AccessGroup.INIT_TYPE_ISCSI_IQN: 'iSCSI', AccessGroup.INIT_TYPE_ISCSI_WWPN_MIXED: 'iSCSI/WWPN Mixed', } def ag_init_type_to_str(init_type): return _enum_type_to_str(init_type, _AG_INIT_TYPE_CONV) def ag_init_type_str_to_lsm(init_type_str): return _str_to_enum(init_type_str, _AG_INIT_TYPE_CONV) _TGT_PORT_TYPE_CONV = { TargetPort.TYPE_OTHER: 'Other', TargetPort.TYPE_FC: 'FC', TargetPort.TYPE_FCOE: 'FCoE', TargetPort.TYPE_ISCSI: 'iSCSI', } def tgt_port_type_to_str(port_type): return _enum_type_to_str(port_type, _TGT_PORT_TYPE_CONV) class PlugData(object): def __init__(self, description, plugin_version): self.desc = description self.version = plugin_version class VolumeRAIDInfo(object): _RAID_TYPE_MAP = { Volume.RAID_TYPE_RAID0: 'RAID0', Volume.RAID_TYPE_RAID1: 'RAID1', Volume.RAID_TYPE_RAID3: 'RAID3', Volume.RAID_TYPE_RAID4: 'RAID4', Volume.RAID_TYPE_RAID5: 'RAID5', Volume.RAID_TYPE_RAID6: 'RAID6', Volume.RAID_TYPE_RAID10: 'RAID10', Volume.RAID_TYPE_RAID15: 'RAID15', Volume.RAID_TYPE_RAID16: 'RAID16', Volume.RAID_TYPE_RAID50: 'RAID50', Volume.RAID_TYPE_RAID60: 'RAID60', Volume.RAID_TYPE_RAID51: 'RAID51', Volume.RAID_TYPE_RAID61: 'RAID61', Volume.RAID_TYPE_JBOD: 'JBOD', Volume.RAID_TYPE_MIXED: 'MIXED', Volume.RAID_TYPE_OTHER: 'OTHER', Volume.RAID_TYPE_UNKNOWN: 'UNKNOWN', } VOL_CREATE_RAID_TYPES_STR = [ 'RAID0', 'RAID1', 'RAID5', 'RAID6', 'RAID10', 'RAID50', 'RAID60'] def __init__(self, vol_id, raid_type, strip_size, disk_count, min_io_size, opt_io_size): self.vol_id = vol_id self.raid_type = raid_type self.strip_size = strip_size self.disk_count = disk_count self.min_io_size = min_io_size self.opt_io_size = opt_io_size @staticmethod def raid_type_to_str(raid_type): return _enum_type_to_str(raid_type, VolumeRAIDInfo._RAID_TYPE_MAP) @staticmethod def raid_type_str_to_lsm(raid_type_str): return _str_to_enum(raid_type_str, VolumeRAIDInfo._RAID_TYPE_MAP) class PoolRAIDInfo(object): _MEMBER_TYPE_MAP = { Pool.MEMBER_TYPE_UNKNOWN: 'Unknown', Pool.MEMBER_TYPE_OTHER: 'Unknown', Pool.MEMBER_TYPE_POOL: 'Pool', Pool.MEMBER_TYPE_DISK: 'Disk', } def __init__(self, pool_id, raid_type, member_type, member_ids): self.pool_id = pool_id self.raid_type = raid_type self.member_type = member_type self.member_ids = member_ids @staticmethod def member_type_to_str(member_type): return _enum_type_to_str( member_type, PoolRAIDInfo._MEMBER_TYPE_MAP) class VcrCap(object): def __init__(self, system_id, raid_types, strip_sizes): self.system_id = system_id self.raid_types = raid_types self.strip_sizes = strip_sizes class DisplayData(object): def __init__(self): pass DISPLAY_WAY_COLUMN = 0 DISPLAY_WAY_SCRIPT = 1 DISPLAY_WAY_DEFAULT = DISPLAY_WAY_COLUMN DEFAULT_SPLITTER = ' | ' VALUE_CONVERT = {} # lsm.System SYSTEM_HEADER = OrderedDict() SYSTEM_HEADER['id'] = 'ID' SYSTEM_HEADER['name'] = 'Name' SYSTEM_HEADER['status'] = 'Status' SYSTEM_HEADER['status_info'] = 'Info' SYSTEM_COLUMN_SKIP_KEYS = [] # XXX_COLUMN_SKIP_KEYS contain a list of property should be skipped when # displaying in column way. SYSTEM_VALUE_CONV_ENUM = { 'status': system_status_to_str, } SYSTEM_VALUE_CONV_HUMAN = [] VALUE_CONVERT[System] = { 'headers': SYSTEM_HEADER, 'column_skip_keys': SYSTEM_COLUMN_SKIP_KEYS, 'value_conv_enum': SYSTEM_VALUE_CONV_ENUM, 'value_conv_human': SYSTEM_VALUE_CONV_HUMAN, } PLUG_DATA_HEADER = OrderedDict() PLUG_DATA_HEADER['desc'] = 'Description' PLUG_DATA_HEADER['version'] = 'Version' PLUG_DATA_COLUMN_SKIP_KEYS = [] PLUG_DATA_VALUE_CONV_ENUM = {} PLUG_DATA_VALUE_CONV_HUMAN = [] VALUE_CONVERT[PlugData] = { 'headers': PLUG_DATA_HEADER, 'column_skip_keys': PLUG_DATA_COLUMN_SKIP_KEYS, 'value_conv_enum': PLUG_DATA_VALUE_CONV_ENUM, 'value_conv_human': PLUG_DATA_VALUE_CONV_HUMAN, } # lsm.Pool POOL_HEADER = OrderedDict() POOL_HEADER['id'] = 'ID' POOL_HEADER['name'] = 'Name' POOL_HEADER['element_type'] = 'Element Type' POOL_HEADER['unsupported_actions'] = 'Does not support' POOL_HEADER['total_space'] = 'Total Space' POOL_HEADER['free_space'] = 'Free Space' POOL_HEADER['status'] = 'Status' POOL_HEADER['status_info'] = 'Info' POOL_HEADER['system_id'] = 'System ID' POOL_COLUMN_SKIP_KEYS = ['unsupported_actions'] POOL_VALUE_CONV_ENUM = { 'status': pool_status_to_str, 'element_type': pool_element_type_to_str, 'unsupported_actions': pool_unsupported_actions_to_str } POOL_VALUE_CONV_HUMAN = ['total_space', 'free_space'] VALUE_CONVERT[Pool] = { 'headers': POOL_HEADER, 'column_skip_keys': POOL_COLUMN_SKIP_KEYS, 'value_conv_enum': POOL_VALUE_CONV_ENUM, 'value_conv_human': POOL_VALUE_CONV_HUMAN, } # lsm.Volume VOL_HEADER = OrderedDict() VOL_HEADER['id'] = 'ID' VOL_HEADER['name'] = 'Name' VOL_HEADER['vpd83'] = 'SCSI VPD 0x83' VOL_HEADER['block_size'] = 'Block Size' VOL_HEADER['num_of_blocks'] = 'Block Count' VOL_HEADER['size_bytes'] = 'Size' VOL_HEADER['admin_state'] = 'Disabled' VOL_HEADER['pool_id'] = 'Pool ID' VOL_HEADER['system_id'] = 'System ID' VOL_COLUMN_SKIP_KEYS = ['block_size', 'num_of_blocks'] VOL_VALUE_CONV_ENUM = { 'admin_state': vol_admin_state_to_str } VOL_VALUE_CONV_HUMAN = ['size_bytes', 'block_size'] VALUE_CONVERT[Volume] = { 'headers': VOL_HEADER, 'column_skip_keys': VOL_COLUMN_SKIP_KEYS, 'value_conv_enum': VOL_VALUE_CONV_ENUM, 'value_conv_human': VOL_VALUE_CONV_HUMAN, } # lsm.Disk DISK_HEADER = OrderedDict() DISK_HEADER['id'] = 'ID' DISK_HEADER['name'] = 'Name' DISK_HEADER['disk_type'] = 'Type' DISK_HEADER['block_size'] = 'Block Size' DISK_HEADER['num_of_blocks'] = 'Block Count' DISK_HEADER['size_bytes'] = 'Size' DISK_HEADER['status'] = 'Status' DISK_HEADER['system_id'] = 'System ID' DISK_COLUMN_SKIP_KEYS = ['block_size', 'num_of_blocks'] DISK_VALUE_CONV_ENUM = { 'status': disk_status_to_str, 'disk_type': disk_type_to_str, } DISK_VALUE_CONV_HUMAN = ['size_bytes', 'block_size'] VALUE_CONVERT[Disk] = { 'headers': DISK_HEADER, 'column_skip_keys': DISK_COLUMN_SKIP_KEYS, 'value_conv_enum': DISK_VALUE_CONV_ENUM, 'value_conv_human': DISK_VALUE_CONV_HUMAN, } # lsm.AccessGroup AG_HEADER = OrderedDict() AG_HEADER['id'] = 'ID' AG_HEADER['name'] = 'Name' AG_HEADER['init_ids'] = 'Initiator IDs' AG_HEADER['init_type'] = 'Type' AG_HEADER['system_id'] = 'System ID' AG_COLUMN_SKIP_KEYS = ['init_type'] AG_VALUE_CONV_ENUM = { 'init_type': ag_init_type_to_str, } AG_VALUE_CONV_HUMAN = [] VALUE_CONVERT[AccessGroup] = { 'headers': AG_HEADER, 'column_skip_keys': AG_COLUMN_SKIP_KEYS, 'value_conv_enum': AG_VALUE_CONV_ENUM, 'value_conv_human': AG_VALUE_CONV_HUMAN, } # lsm.FileSystem FS_HEADER = OrderedDict() FS_HEADER['id'] = 'ID' FS_HEADER['name'] = 'Name' FS_HEADER['total_space'] = 'Total Space' FS_HEADER['free_space'] = 'Free Space' FS_HEADER['pool_id'] = 'Pool ID' FS_HEADER['system_id'] = 'System ID' FS_COLUMN_SKIP_KEYS = [] FS_VALUE_CONV_ENUM = { } FS_VALUE_CONV_HUMAN = ['total_space', 'free_space'] VALUE_CONVERT[FileSystem] = { 'headers': FS_HEADER, 'column_skip_keys': FS_COLUMN_SKIP_KEYS, 'value_conv_enum': FS_VALUE_CONV_ENUM, 'value_conv_human': FS_VALUE_CONV_HUMAN, } # lsm.FsSnapshot FS_SNAP_HEADER = OrderedDict() FS_SNAP_HEADER['id'] = 'ID' FS_SNAP_HEADER['name'] = 'Name' FS_SNAP_HEADER['ts'] = 'Time Stamp' FS_SNAP_COLUMN_SKIP_KEYS = [] FS_SNAP_VALUE_CONV_ENUM = { 'ts': datetime.fromtimestamp } FS_SNAP_VALUE_CONV_HUMAN = [] VALUE_CONVERT[FsSnapshot] = { 'headers': FS_SNAP_HEADER, 'column_skip_keys': FS_SNAP_COLUMN_SKIP_KEYS, 'value_conv_enum': FS_SNAP_VALUE_CONV_ENUM, 'value_conv_human': FS_SNAP_VALUE_CONV_HUMAN, } # lsm.NfsExport NFS_EXPORT_HEADER = OrderedDict() NFS_EXPORT_HEADER['id'] = 'ID' NFS_EXPORT_HEADER['fs_id'] = 'FileSystem ID' NFS_EXPORT_HEADER['export_path'] = 'Export Path' NFS_EXPORT_HEADER['auth'] = 'Auth Type' NFS_EXPORT_HEADER['root'] = 'Root Hosts' NFS_EXPORT_HEADER['rw'] = 'RW Hosts' NFS_EXPORT_HEADER['ro'] = 'RO Hosts' NFS_EXPORT_HEADER['anonuid'] = 'Anonymous UID' NFS_EXPORT_HEADER['anongid'] = 'Anonymous GID' NFS_EXPORT_HEADER['options'] = 'Options' NFS_EXPORT_COLUMN_SKIP_KEYS = ['anonuid', 'anongid', 'auth'] NFS_EXPORT_VALUE_CONV_ENUM = {} NFS_EXPORT_VALUE_CONV_HUMAN = [] VALUE_CONVERT[NfsExport] = { 'headers': NFS_EXPORT_HEADER, 'column_skip_keys': NFS_EXPORT_COLUMN_SKIP_KEYS, 'value_conv_enum': NFS_EXPORT_VALUE_CONV_ENUM, 'value_conv_human': NFS_EXPORT_VALUE_CONV_HUMAN, } # lsm.TargetPort TGT_PORT_HEADER = OrderedDict() TGT_PORT_HEADER['id'] = 'ID' TGT_PORT_HEADER['port_type'] = 'Type' TGT_PORT_HEADER['physical_name'] = 'Physical Name' TGT_PORT_HEADER['service_address'] = 'Address' TGT_PORT_HEADER['network_address'] = 'Network Address' TGT_PORT_HEADER['physical_address'] = 'Physical Address' TGT_PORT_HEADER['system_id'] = 'System ID' TGT_PORT_COLUMN_SKIP_KEYS = ['physical_address', 'physical_name'] TGT_PORT_VALUE_CONV_ENUM = { 'port_type': tgt_port_type_to_str, } TGT_PORT_VALUE_CONV_HUMAN = [] VALUE_CONVERT[TargetPort] = { 'headers': TGT_PORT_HEADER, 'column_skip_keys': TGT_PORT_COLUMN_SKIP_KEYS, 'value_conv_enum': TGT_PORT_VALUE_CONV_ENUM, 'value_conv_human': TGT_PORT_VALUE_CONV_HUMAN, } VOL_RAID_INFO_HEADER = OrderedDict() VOL_RAID_INFO_HEADER['vol_id'] = 'Volume ID' VOL_RAID_INFO_HEADER['raid_type'] = 'RAID Type' VOL_RAID_INFO_HEADER['strip_size'] = 'Strip Size' VOL_RAID_INFO_HEADER['disk_count'] = 'Disk Count' VOL_RAID_INFO_HEADER['min_io_size'] = 'Minimum I/O Size' VOL_RAID_INFO_HEADER['opt_io_size'] = 'Optimal I/O Size' VOL_RAID_INFO_COLUMN_SKIP_KEYS = [] VOL_RAID_INFO_VALUE_CONV_ENUM = { 'raid_type': VolumeRAIDInfo.raid_type_to_str, } VOL_RAID_INFO_VALUE_CONV_HUMAN = [ 'strip_size', 'min_io_size', 'opt_io_size'] VALUE_CONVERT[VolumeRAIDInfo] = { 'headers': VOL_RAID_INFO_HEADER, 'column_skip_keys': VOL_RAID_INFO_COLUMN_SKIP_KEYS, 'value_conv_enum': VOL_RAID_INFO_VALUE_CONV_ENUM, 'value_conv_human': VOL_RAID_INFO_VALUE_CONV_HUMAN, } POOL_RAID_INFO_HEADER = OrderedDict() POOL_RAID_INFO_HEADER['pool_id'] = 'Pool ID' POOL_RAID_INFO_HEADER['raid_type'] = 'RAID Type' POOL_RAID_INFO_HEADER['member_type'] = 'Member Type' POOL_RAID_INFO_HEADER['member_ids'] = 'Member IDs' POOL_RAID_INFO_COLUMN_SKIP_KEYS = [] POOL_RAID_INFO_VALUE_CONV_ENUM = { 'raid_type': VolumeRAIDInfo.raid_type_to_str, 'member_type': PoolRAIDInfo.member_type_to_str, } POOL_RAID_INFO_VALUE_CONV_HUMAN = [] VALUE_CONVERT[PoolRAIDInfo] = { 'headers': POOL_RAID_INFO_HEADER, 'column_skip_keys': POOL_RAID_INFO_COLUMN_SKIP_KEYS, 'value_conv_enum': POOL_RAID_INFO_VALUE_CONV_ENUM, 'value_conv_human': POOL_RAID_INFO_VALUE_CONV_HUMAN, } VCR_CAP_HEADER = OrderedDict() VCR_CAP_HEADER['system_id'] = 'System ID' VCR_CAP_HEADER['raid_types'] = 'Supported RAID Types' VCR_CAP_HEADER['strip_sizes'] = 'Supported Strip Sizes' VCR_CAP_COLUMN_SKIP_KEYS = [] VCR_CAP_VALUE_CONV_ENUM = { 'raid_types': lambda i: [VolumeRAIDInfo.raid_type_to_str(x) for x in i] } VCR_CAP_VALUE_CONV_HUMAN = ['strip_sizes'] VALUE_CONVERT[VcrCap] = { 'headers': VCR_CAP_HEADER, 'column_skip_keys': VCR_CAP_COLUMN_SKIP_KEYS, 'value_conv_enum': VCR_CAP_VALUE_CONV_ENUM, 'value_conv_human': VCR_CAP_VALUE_CONV_HUMAN, } @staticmethod def _get_man_pro_value(obj, key, value_conv_enum, value_conv_human, flag_human, flag_enum): value = getattr(obj, key) if not flag_enum: if key in value_conv_enum.keys(): value = value_conv_enum[key](value) if flag_human: if key in value_conv_human: if type(value) is list: value = list(size_bytes_2_size_human(s) for s in value) else: value = size_bytes_2_size_human(value) return value @staticmethod def _find_max_width(two_d_list, column_index): max_width = 1 for row_index in range(0, len(two_d_list)): row_data = two_d_list[row_index] if len(row_data[column_index]) > max_width: max_width = len(row_data[column_index]) return max_width @staticmethod def _data_dict_gen(obj, flag_human, flag_enum, display_way, extra_properties=None, flag_dsp_all_data=False): data_dict = OrderedDict() value_convert = DisplayData.VALUE_CONVERT[type(obj)] headers = value_convert['headers'] value_conv_enum = value_convert['value_conv_enum'] value_conv_human = value_convert['value_conv_human'] if flag_dsp_all_data: display_way = DisplayData.DISPLAY_WAY_SCRIPT display_keys = [] if display_way == DisplayData.DISPLAY_WAY_COLUMN: for key_name in headers.keys(): if key_name not in value_convert['column_skip_keys']: display_keys.append(key_name) elif display_way == DisplayData.DISPLAY_WAY_SCRIPT: display_keys = headers.keys() if extra_properties: for extra_key_name in extra_properties: if extra_key_name not in display_keys: display_keys.append(extra_key_name) for key in display_keys: key_str = headers[key] value = DisplayData._get_man_pro_value( obj, key, value_conv_enum, value_conv_human, flag_human, flag_enum) data_dict[key_str] = value return data_dict @staticmethod def display_data(objs, display_way=None, flag_human=True, flag_enum=False, extra_properties=None, splitter=None, flag_with_header=True, flag_dsp_all_data=False): if len(objs) == 0: return None if display_way is None: display_way = DisplayData.DISPLAY_WAY_DEFAULT if splitter is None: splitter = DisplayData.DEFAULT_SPLITTER data_dict_list = [] if type(objs[0]) in DisplayData.VALUE_CONVERT.keys(): for obj in objs: data_dict = DisplayData._data_dict_gen( obj, flag_human, flag_enum, display_way, extra_properties, flag_dsp_all_data) data_dict_list.extend([data_dict]) else: return None if display_way == DisplayData.DISPLAY_WAY_SCRIPT: DisplayData.display_data_script_way(data_dict_list, splitter) elif display_way == DisplayData.DISPLAY_WAY_COLUMN: DisplayData._display_data_column_way( data_dict_list, splitter, flag_with_header) return True @staticmethod def display_data_script_way(data_dict_list, splitter): key_column_width = 1 value_column_width = 1 for data_dict in data_dict_list: for key_name in data_dict.keys(): # find the max column width of key cur_key_width = len(key_name) if cur_key_width > key_column_width: key_column_width = cur_key_width # find the max column width of value cur_value = data_dict[key_name] cur_value_width = 0 if isinstance(cur_value, list): if len(cur_value) == 0: continue cur_value_width = len(str(cur_value[0])) else: cur_value_width = len(str(cur_value)) if cur_value_width > value_column_width: value_column_width = cur_value_width row_format = '%%-%ds%s%%-%ds' % (key_column_width, splitter, value_column_width) sub_row_format = '%s%s%%-%ds' % (' ' * key_column_width, splitter, value_column_width) obj_splitter = '%s%s%s' % ('-' * key_column_width, '-' * len(splitter), '-' * value_column_width) for data_dict in data_dict_list: out(obj_splitter) for key_name in data_dict: value = data_dict[key_name] if isinstance(value, list): flag_first_data = True for sub_value in value: if flag_first_data: out(row_format % (key_name, str(sub_value))) flag_first_data = False else: out(sub_row_format % str(sub_value)) else: out(row_format % (key_name, str(value))) out(obj_splitter) @staticmethod def _display_data_column_way(data_dict_list, splitter, flag_with_header): if len(data_dict_list) == 0: return two_d_list = [] item_count = len(data_dict_list[0].keys()) # determine how many lines we will print row_width = 0 for data_dict in data_dict_list: cur_max_wd = 0 for key_name in data_dict.keys(): if isinstance(data_dict[key_name], list): cur_row_width = len(data_dict[key_name]) if cur_row_width > cur_max_wd: cur_max_wd = cur_row_width else: pass if cur_max_wd == 0: cur_max_wd = 1 row_width += cur_max_wd if flag_with_header: # first line for header row_width += 1 # init 2D list for raw in range(0, row_width): new = [] for column in range(0, item_count): new.append('') two_d_list.append(new) # header current_row_num = -1 if flag_with_header: two_d_list[0] = data_dict_list[0].keys() current_row_num = 0 # Fill the 2D list with data_dict_list for data_dict in data_dict_list: current_row_num += 1 save_row_num = current_row_num values = data_dict.values() for index in range(0, len(values)): value = values[index] if isinstance(value, list): for sub_index in range(0, len(value)): tmp_row_num = save_row_num + sub_index two_d_list[tmp_row_num][index] = str(value[sub_index]) if save_row_num + len(value) > current_row_num: current_row_num = save_row_num + len(value) - 1 else: two_d_list[save_row_num][index] = str(value) # display two_list row_formats = [] header_splitter = '' for column_index in range(0, len(two_d_list[0])): max_width = DisplayData._find_max_width(two_d_list, column_index) row_formats.extend(['%%-%ds' % max_width]) header_splitter += '-' * max_width if column_index != (len(two_d_list[0]) - 1): header_splitter += '-' * len(splitter) row_format = splitter.join(row_formats) for row_index in range(0, len(two_d_list)): out(row_format % tuple(two_d_list[row_index])) if row_index == 0 and flag_with_header: out(header_splitter) libstoragemgmt-1.2.3/autogen.sh0000775000175000017500000000022312537546123013466 00000000000000#!/bin/bash #Clean stuff up to ensure a clean autobuild rm -rf autom4te.cache/* rm -rf build-aux/* rm -f m4/l* rm -f aclocal.m4 autoreconf -f -i libstoragemgmt-1.2.3/COPYING.LIB0000664000175000017500000006364712537546165013156 00000000000000 GNU LESSER GENERAL PUBLIC LICENSE Version 2.1, February 1999 Copyright (C) 1991, 1999 Free Software Foundation, Inc. 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed. [This is the first released version of the Lesser GPL. It also counts as the successor of the GNU Library Public License, version 2, hence the version number 2.1.] Preamble The licenses for most software are designed to take away your freedom to share and change it. By contrast, the GNU General Public Licenses are intended to guarantee your freedom to share and change free software--to make sure the software is free for all its users. This license, the Lesser General Public License, applies to some specially designated software packages--typically libraries--of the Free Software Foundation and other authors who decide to use it. You can use it too, but we suggest you first think carefully about whether this license or the ordinary General Public License is the better strategy to use in any particular case, based on the explanations below. When we speak of free software, we are referring to freedom of use, not price. Our General Public Licenses are designed to make sure that you have the freedom to distribute copies of free software (and charge for this service if you wish); that you receive source code or can get it if you want it; that you can change the software and use pieces of it in new free programs; and that you are informed that you can do these things. To protect your rights, we need to make restrictions that forbid distributors to deny you these rights or to ask you to surrender these rights. These restrictions translate to certain responsibilities for you if you distribute copies of the library or if you modify it. For example, if you distribute copies of the library, whether gratis or for a fee, you must give the recipients all the rights that we gave you. You must make sure that they, too, receive or can get the source code. If you link other code with the library, you must provide complete object files to the recipients, so that they can relink them with the library after making changes to the library and recompiling it. And you must show them these terms so they know their rights. We protect your rights with a two-step method: (1) we copyright the library, and (2) we offer you this license, which gives you legal permission to copy, distribute and/or modify the library. To protect each distributor, we want to make it very clear that there is no warranty for the free library. Also, if the library is modified by someone else and passed on, the recipients should know that what they have is not the original version, so that the original author's reputation will not be affected by problems that might be introduced by others. ^L Finally, software patents pose a constant threat to the existence of any free program. We wish to make sure that a company cannot effectively restrict the users of a free program by obtaining a restrictive license from a patent holder. Therefore, we insist that any patent license obtained for a version of the library must be consistent with the full freedom of use specified in this license. Most GNU software, including some libraries, is covered by the ordinary GNU General Public License. This license, the GNU Lesser General Public License, applies to certain designated libraries, and is quite different from the ordinary General Public License. We use this license for certain libraries in order to permit linking those libraries into non-free programs. When a program is linked with a library, whether statically or using a shared library, the combination of the two is legally speaking a combined work, a derivative of the original library. The ordinary General Public License therefore permits such linking only if the entire combination fits its criteria of freedom. The Lesser General Public License permits more lax criteria for linking other code with the library. We call this license the "Lesser" General Public License because it does Less to protect the user's freedom than the ordinary General Public License. It also provides other free software developers Less of an advantage over competing non-free programs. These disadvantages are the reason we use the ordinary General Public License for many libraries. However, the Lesser license provides advantages in certain special circumstances. For example, on rare occasions, there may be a special need to encourage the widest possible use of a certain library, so that it becomes a de-facto standard. To achieve this, non-free programs must be allowed to use the library. A more frequent case is that a free library does the same job as widely used non-free libraries. In this case, there is little to gain by limiting the free library to free software only, so we use the Lesser General Public License. In other cases, permission to use a particular library in non-free programs enables a greater number of people to use a large body of free software. For example, permission to use the GNU C Library in non-free programs enables many more people to use the whole GNU operating system, as well as its variant, the GNU/Linux operating system. Although the Lesser General Public License is Less protective of the users' freedom, it does ensure that the user of a program that is linked with the Library has the freedom and the wherewithal to run that program using a modified version of the Library. The precise terms and conditions for copying, distribution and modification follow. Pay close attention to the difference between a "work based on the library" and a "work that uses the library". The former contains code derived from the library, whereas the latter must be combined with the library in order to run. ^L GNU LESSER GENERAL PUBLIC LICENSE TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION 0. This License Agreement applies to any software library or other program which contains a notice placed by the copyright holder or other authorized party saying it may be distributed under the terms of this Lesser General Public License (also called "this License"). Each licensee is addressed as "you". A "library" means a collection of software functions and/or data prepared so as to be conveniently linked with application programs (which use some of those functions and data) to form executables. The "Library", below, refers to any such software library or work which has been distributed under these terms. A "work based on the Library" means either the Library or any derivative work under copyright law: that is to say, a work containing the Library or a portion of it, either verbatim or with modifications and/or translated straightforwardly into another language. (Hereinafter, translation is included without limitation in the term "modification".) "Source code" for a work means the preferred form of the work for making modifications to it. For a library, complete source code means all the source code for all modules it contains, plus any associated interface definition files, plus the scripts used to control compilation and installation of the library. Activities other than copying, distribution and modification are not covered by this License; they are outside its scope. The act of running a program using the Library is not restricted, and output from such a program is covered only if its contents constitute a work based on the Library (independent of the use of the Library in a tool for writing it). Whether that is true depends on what the Library does and what the program that uses the Library does. 1. You may copy and distribute verbatim copies of the Library's complete source code as you receive it, in any medium, provided that you conspicuously and appropriately publish on each copy an appropriate copyright notice and disclaimer of warranty; keep intact all the notices that refer to this License and to the absence of any warranty; and distribute a copy of this License along with the Library. You may charge a fee for the physical act of transferring a copy, and you may at your option offer warranty protection in exchange for a fee. 2. You may modify your copy or copies of the Library or any portion of it, thus forming a work based on the Library, and copy and distribute such modifications or work under the terms of Section 1 above, provided that you also meet all of these conditions: a) The modified work must itself be a software library. b) You must cause the files modified to carry prominent notices stating that you changed the files and the date of any change. c) You must cause the whole of the work to be licensed at no charge to all third parties under the terms of this License. d) If a facility in the modified Library refers to a function or a table of data to be supplied by an application program that uses the facility, other than as an argument passed when the facility is invoked, then you must make a good faith effort to ensure that, in the event an application does not supply such function or table, the facility still operates, and performs whatever part of its purpose remains meaningful. (For example, a function in a library to compute square roots has a purpose that is entirely well-defined independent of the application. Therefore, Subsection 2d requires that any application-supplied function or table used by this function must be optional: if the application does not supply it, the square root function must still compute square roots.) These requirements apply to the modified work as a whole. If identifiable sections of that work are not derived from the Library, and can be reasonably considered independent and separate works in themselves, then this License, and its terms, do not apply to those sections when you distribute them as separate works. But when you distribute the same sections as part of a whole which is a work based on the Library, the distribution of the whole must be on the terms of this License, whose permissions for other licensees extend to the entire whole, and thus to each and every part regardless of who wrote it. Thus, it is not the intent of this section to claim rights or contest your rights to work written entirely by you; rather, the intent is to exercise the right to control the distribution of derivative or collective works based on the Library. In addition, mere aggregation of another work not based on the Library with the Library (or with a work based on the Library) on a volume of a storage or distribution medium does not bring the other work under the scope of this License. 3. You may opt to apply the terms of the ordinary GNU General Public License instead of this License to a given copy of the Library. To do this, you must alter all the notices that refer to this License, so that they refer to the ordinary GNU General Public License, version 2, instead of to this License. (If a newer version than version 2 of the ordinary GNU General Public License has appeared, then you can specify that version instead if you wish.) Do not make any other change in these notices. ^L Once this change is made in a given copy, it is irreversible for that copy, so the ordinary GNU General Public License applies to all subsequent copies and derivative works made from that copy. This option is useful when you wish to copy part of the code of the Library into a program that is not a library. 4. You may copy and distribute the Library (or a portion or derivative of it, under Section 2) in object code or executable form under the terms of Sections 1 and 2 above provided that you accompany it with the complete corresponding machine-readable source code, which must be distributed under the terms of Sections 1 and 2 above on a medium customarily used for software interchange. If distribution of object code is made by offering access to copy from a designated place, then offering equivalent access to copy the source code from the same place satisfies the requirement to distribute the source code, even though third parties are not compelled to copy the source along with the object code. 5. A program that contains no derivative of any portion of the Library, but is designed to work with the Library by being compiled or linked with it, is called a "work that uses the Library". Such a work, in isolation, is not a derivative work of the Library, and therefore falls outside the scope of this License. However, linking a "work that uses the Library" with the Library creates an executable that is a derivative of the Library (because it contains portions of the Library), rather than a "work that uses the library". The executable is therefore covered by this License. Section 6 states terms for distribution of such executables. When a "work that uses the Library" uses material from a header file that is part of the Library, the object code for the work may be a derivative work of the Library even though the source code is not. Whether this is true is especially significant if the work can be linked without the Library, or if the work is itself a library. The threshold for this to be true is not precisely defined by law. If such an object file uses only numerical parameters, data structure layouts and accessors, and small macros and small inline functions (ten lines or less in length), then the use of the object file is unrestricted, regardless of whether it is legally a derivative work. (Executables containing this object code plus portions of the Library will still fall under Section 6.) Otherwise, if the work is a derivative of the Library, you may distribute the object code for the work under the terms of Section 6. Any executables containing that work also fall under Section 6, whether or not they are linked directly with the Library itself. ^L 6. As an exception to the Sections above, you may also combine or link a "work that uses the Library" with the Library to produce a work containing portions of the Library, and distribute that work under terms of your choice, provided that the terms permit modification of the work for the customer's own use and reverse engineering for debugging such modifications. You must give prominent notice with each copy of the work that the Library is used in it and that the Library and its use are covered by this License. You must supply a copy of this License. If the work during execution displays copyright notices, you must include the copyright notice for the Library among them, as well as a reference directing the user to the copy of this License. Also, you must do one of these things: a) Accompany the work with the complete corresponding machine-readable source code for the Library including whatever changes were used in the work (which must be distributed under Sections 1 and 2 above); and, if the work is an executable linked with the Library, with the complete machine-readable "work that uses the Library", as object code and/or source code, so that the user can modify the Library and then relink to produce a modified executable containing the modified Library. (It is understood that the user who changes the contents of definitions files in the Library will not necessarily be able to recompile the application to use the modified definitions.) b) Use a suitable shared library mechanism for linking with the Library. A suitable mechanism is one that (1) uses at run time a copy of the library already present on the user's computer system, rather than copying library functions into the executable, and (2) will operate properly with a modified version of the library, if the user installs one, as long as the modified version is interface-compatible with the version that the work was made with. c) Accompany the work with a written offer, valid for at least three years, to give the same user the materials specified in Subsection 6a, above, for a charge no more than the cost of performing this distribution. d) If distribution of the work is made by offering access to copy from a designated place, offer equivalent access to copy the above specified materials from the same place. e) Verify that the user has already received a copy of these materials or that you have already sent this user a copy. For an executable, the required form of the "work that uses the Library" must include any data and utility programs needed for reproducing the executable from it. However, as a special exception, the materials to be distributed need not include anything that is normally distributed (in either source or binary form) with the major components (compiler, kernel, and so on) of the operating system on which the executable runs, unless that component itself accompanies the executable. It may happen that this requirement contradicts the license restrictions of other proprietary libraries that do not normally accompany the operating system. Such a contradiction means you cannot use both them and the Library together in an executable that you distribute. ^L 7. You may place library facilities that are a work based on the Library side-by-side in a single library together with other library facilities not covered by this License, and distribute such a combined library, provided that the separate distribution of the work based on the Library and of the other library facilities is otherwise permitted, and provided that you do these two things: a) Accompany the combined library with a copy of the same work based on the Library, uncombined with any other library facilities. This must be distributed under the terms of the Sections above. b) Give prominent notice with the combined library of the fact that part of it is a work based on the Library, and explaining where to find the accompanying uncombined form of the same work. 8. You may not copy, modify, sublicense, link with, or distribute the Library except as expressly provided under this License. Any attempt otherwise to copy, modify, sublicense, link with, or distribute the Library is void, and will automatically terminate your rights under this License. However, parties who have received copies, or rights, from you under this License will not have their licenses terminated so long as such parties remain in full compliance. 9. You are not required to accept this License, since you have not signed it. However, nothing else grants you permission to modify or distribute the Library or its derivative works. These actions are prohibited by law if you do not accept this License. Therefore, by modifying or distributing the Library (or any work based on the Library), you indicate your acceptance of this License to do so, and all its terms and conditions for copying, distributing or modifying the Library or works based on it. 10. Each time you redistribute the Library (or any work based on the Library), the recipient automatically receives a license from the original licensor to copy, distribute, link with or modify the Library subject to these terms and conditions. You may not impose any further restrictions on the recipients' exercise of the rights granted herein. You are not responsible for enforcing compliance by third parties with this License. ^L 11. If, as a consequence of a court judgment or allegation of patent infringement or for any other reason (not limited to patent issues), conditions are imposed on you (whether by court order, agreement or otherwise) that contradict the conditions of this License, they do not excuse you from the conditions of this License. If you cannot distribute so as to satisfy simultaneously your obligations under this License and any other pertinent obligations, then as a consequence you may not distribute the Library at all. For example, if a patent license would not permit royalty-free redistribution of the Library by all those who receive copies directly or indirectly through you, then the only way you could satisfy both it and this License would be to refrain entirely from distribution of the Library. If any portion of this section is held invalid or unenforceable under any particular circumstance, the balance of the section is intended to apply, and the section as a whole is intended to apply in other circumstances. It is not the purpose of this section to induce you to infringe any patents or other property right claims or to contest validity of any such claims; this section has the sole purpose of protecting the integrity of the free software distribution system which is implemented by public license practices. Many people have made generous contributions to the wide range of software distributed through that system in reliance on consistent application of that system; it is up to the author/donor to decide if he or she is willing to distribute software through any other system and a licensee cannot impose that choice. This section is intended to make thoroughly clear what is believed to be a consequence of the rest of this License. 12. If the distribution and/or use of the Library is restricted in certain countries either by patents or by copyrighted interfaces, the original copyright holder who places the Library under this License may add an explicit geographical distribution limitation excluding those countries, so that distribution is permitted only in or among countries not thus excluded. In such case, this License incorporates the limitation as if written in the body of this License. 13. The Free Software Foundation may publish revised and/or new versions of the Lesser General Public License from time to time. Such new versions will be similar in spirit to the present version, but may differ in detail to address new problems or concerns. Each version is given a distinguishing version number. If the Library specifies a version number of this License which applies to it and "any later version", you have the option of following the terms and conditions either of that version or of any later version published by the Free Software Foundation. If the Library does not specify a license version number, you may choose any version ever published by the Free Software Foundation. ^L 14. If you wish to incorporate parts of the Library into other free programs whose distribution conditions are incompatible with these, write to the author to ask for permission. For software which is copyrighted by the Free Software Foundation, write to the Free Software Foundation; we sometimes make exceptions for this. Our decision will be guided by the two goals of preserving the free status of all derivatives of our free software and of promoting the sharing and reuse of software generally. NO WARRANTY 15. BECAUSE THE LIBRARY IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY FOR THE LIBRARY, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE LIBRARY "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE LIBRARY IS WITH YOU. SHOULD THE LIBRARY PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. 16. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR REDISTRIBUTE THE LIBRARY AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE LIBRARY (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A FAILURE OF THE LIBRARY TO OPERATE WITH ANY OTHER SOFTWARE), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. END OF TERMS AND CONDITIONS ^L How to Apply These Terms to Your New Libraries If you develop a new library, and you want it to be of the greatest possible use to the public, we recommend making it free software that everyone can redistribute and change. You can do so by permitting redistribution under these terms (or, alternatively, under the terms of the ordinary General Public License). To apply these terms, attach the following notices to the library. It is safest to attach them to the start of each source file to most effectively convey the exclusion of warranty; and each file should have at least the "copyright" line and a pointer to where the full notice is found. Copyright (C) This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA Also add information on how to contact you by electronic and paper mail. You should also get your employer (if you work as a programmer) or your school, if any, to sign a "copyright disclaimer" for the library, if necessary. Here is a sample; alter the names: Yoyodyne, Inc., hereby disclaims all copyright interest in the library `Frob' (a library for tweaking knobs) written by James Random Hacker. , 1 April 1990 Ty Coon, President of Vice That's all there is to it! libstoragemgmt-1.2.3/config/0000775000175000017500000000000012542455463013017 500000000000000libstoragemgmt-1.2.3/config/lsmd.conf0000664000175000017500000000004412537546123014541 00000000000000allow-plugin-root-privilege = true; libstoragemgmt-1.2.3/config/pluginconf.d/0000775000175000017500000000000012542455463015405 500000000000000libstoragemgmt-1.2.3/config/pluginconf.d/sim.conf0000664000175000017500000000004012537546123016754 00000000000000require-root-privilege = false; libstoragemgmt-1.2.3/config/pluginconf.d/hpsa.conf0000664000175000017500000000003712537546123017125 00000000000000require-root-privilege = true; libstoragemgmt-1.2.3/config/pluginconf.d/megaraid.conf0000664000175000017500000000003712537546123017743 00000000000000require-root-privilege = true; libstoragemgmt-1.2.3/config/Makefile.am0000664000175000017500000000061012537546123014766 00000000000000lsmconfdir=$(sysconfdir)/lsm lsmconf_DATA=lsmd.conf EXTRA_DIST= lsmd.conf pluginconf.d/sim.conf pluginconfdir=$(sysconfdir)/lsm/pluginconf.d pluginconf_DATA=pluginconf.d/sim.conf if WITH_MEGARAID pluginconf_DATA += pluginconf.d/megaraid.conf EXTRA_DIST += pluginconf.d/megaraid.conf endif if WITH_HPSA pluginconf_DATA += pluginconf.d/hpsa.conf EXTRA_DIST += pluginconf.d/hpsa.conf endif libstoragemgmt-1.2.3/config/Makefile.in0000664000175000017500000004066512542455445015017 00000000000000# Makefile.in generated by automake 1.13.4 from Makefile.am. # @configure_input@ # Copyright (C) 1994-2013 Free Software Foundation, Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__is_gnu_make = test -n '$(MAKEFILE_LIST)' && test -n '$(MAKELEVEL)' am__make_running_with_option = \ case $${target_option-} in \ ?) ;; \ *) echo "am__make_running_with_option: internal error: invalid" \ "target option '$${target_option-}' specified" >&2; \ exit 1;; \ esac; \ has_opt=no; \ sane_makeflags=$$MAKEFLAGS; \ if $(am__is_gnu_make); then \ sane_makeflags=$$MFLAGS; \ else \ case $$MAKEFLAGS in \ *\\[\ \ ]*) \ bs=\\; \ sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \ | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \ esac; \ fi; \ skip_next=no; \ strip_trailopt () \ { \ flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \ }; \ for flg in $$sane_makeflags; do \ test $$skip_next = yes && { skip_next=no; continue; }; \ case $$flg in \ *=*|--*) continue;; \ -*I) strip_trailopt 'I'; skip_next=yes;; \ -*I?*) strip_trailopt 'I';; \ -*O) strip_trailopt 'O'; skip_next=yes;; \ -*O?*) strip_trailopt 'O';; \ -*l) strip_trailopt 'l'; skip_next=yes;; \ -*l?*) strip_trailopt 'l';; \ -[dEDm]) skip_next=yes;; \ -[JT]) skip_next=yes;; \ esac; \ case $$flg in \ *$$target_option*) has_opt=yes; break;; \ esac; \ done; \ test $$has_opt = yes am__make_dryrun = (target_option=n; $(am__make_running_with_option)) am__make_keepgoing = (target_option=k; $(am__make_running_with_option)) pkgdatadir = $(datadir)/@PACKAGE@ pkgincludedir = $(includedir)/@PACKAGE@ pkglibdir = $(libdir)/@PACKAGE@ pkglibexecdir = $(libexecdir)/@PACKAGE@ am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ @WITH_MEGARAID_TRUE@am__append_1 = pluginconf.d/megaraid.conf @WITH_MEGARAID_TRUE@am__append_2 = pluginconf.d/megaraid.conf @WITH_HPSA_TRUE@am__append_3 = pluginconf.d/hpsa.conf @WITH_HPSA_TRUE@am__append_4 = pluginconf.d/hpsa.conf subdir = config DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/Makefile.am ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = $(top_srcdir)/m4/ax_python_module.m4 \ $(top_srcdir)/m4/libtool.m4 $(top_srcdir)/m4/ltoptions.m4 \ $(top_srcdir)/m4/ltsugar.m4 $(top_srcdir)/m4/ltversion.m4 \ $(top_srcdir)/m4/lt~obsolete.m4 $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) mkinstalldirs = $(install_sh) -d CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = CONFIG_CLEAN_VPATH_FILES = AM_V_P = $(am__v_P_@AM_V@) am__v_P_ = $(am__v_P_@AM_DEFAULT_V@) am__v_P_0 = false am__v_P_1 = : AM_V_GEN = $(am__v_GEN_@AM_V@) am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@) am__v_GEN_0 = @echo " GEN " $@; am__v_GEN_1 = AM_V_at = $(am__v_at_@AM_V@) am__v_at_ = $(am__v_at_@AM_DEFAULT_V@) am__v_at_0 = @ am__v_at_1 = SOURCES = DIST_SOURCES = am__can_run_installinfo = \ case $$AM_UPDATE_INFO_DIR in \ n|no|NO) false;; \ *) (install-info --version) >/dev/null 2>&1;; \ esac am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; am__vpath_adj = case $$p in \ $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ *) f=$$p;; \ esac; am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`; am__install_max = 40 am__nobase_strip_setup = \ srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'` am__nobase_strip = \ for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||" am__nobase_list = $(am__nobase_strip_setup); \ for p in $$list; do echo "$$p $$p"; done | \ sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \ $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \ if (++n[$$2] == $(am__install_max)) \ { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \ END { for (dir in files) print dir, files[dir] }' am__base_list = \ sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \ sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g' am__uninstall_files_from_dir = { \ test -z "$$files" \ || { test ! -d "$$dir" && test ! -f "$$dir" && test ! -r "$$dir"; } \ || { echo " ( cd '$$dir' && rm -f" $$files ")"; \ $(am__cd) "$$dir" && rm -f $$files; }; \ } am__installdirs = "$(DESTDIR)$(lsmconfdir)" \ "$(DESTDIR)$(pluginconfdir)" DATA = $(lsmconf_DATA) $(pluginconf_DATA) am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) ACLOCAL = @ACLOCAL@ AMTAR = @AMTAR@ AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@ AR = @AR@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ FGREP = @FGREP@ GREP = @GREP@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ JSON_CFLAGS = @JSON_CFLAGS@ JSON_LIBS = @JSON_LIBS@ LD = @LD@ LDFLAGS = @LDFLAGS@ LIBCHECK_CFLAGS = @LIBCHECK_CFLAGS@ LIBCHECK_LIBS = @LIBCHECK_LIBS@ LIBCONFIG_CFLAGS = @LIBCONFIG_CFLAGS@ LIBCONFIG_LIBS = @LIBCONFIG_LIBS@ LIBGLIB_CFLAGS = @LIBGLIB_CFLAGS@ LIBGLIB_LIBS = @LIBGLIB_LIBS@ LIBMICROHTTPD_CFLAGS = @LIBMICROHTTPD_CFLAGS@ LIBMICROHTTPD_LIBS = @LIBMICROHTTPD_LIBS@ LIBOBJS = @LIBOBJS@ LIBS = @LIBS@ LIBSM_LIBTOOL_VERSION = @LIBSM_LIBTOOL_VERSION@ LIBSM_MAJOR_VERSION = @LIBSM_MAJOR_VERSION@ LIBSM_MICRO_VERSION = @LIBSM_MICRO_VERSION@ LIBSM_MINOR_VERSION = @LIBSM_MINOR_VERSION@ LIBSM_VERSION = @LIBSM_VERSION@ LIBSM_VERSION_INFO = @LIBSM_VERSION_INFO@ LIBSM_VERSION_NUMBER = @LIBSM_VERSION_NUMBER@ LIBTOOL = @LIBTOOL@ LIBXML_CFLAGS = @LIBXML_CFLAGS@ LIBXML_LIBS = @LIBXML_LIBS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBOBJS = @LTLIBOBJS@ MAINT = @MAINT@ MAKEINFO = @MAKEINFO@ MANIFEST_TOOL = @MANIFEST_TOOL@ MKDIR_P = @MKDIR_P@ NM = @NM@ NMEDIT = @NMEDIT@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_URL = @PACKAGE_URL@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PERL = @PERL@ PKG_CONFIG = @PKG_CONFIG@ PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@ PKG_CONFIG_PATH = @PKG_CONFIG_PATH@ PYTHON = @PYTHON@ PYTHON_EXEC_PREFIX = @PYTHON_EXEC_PREFIX@ PYTHON_PLATFORM = @PYTHON_PLATFORM@ PYTHON_PREFIX = @PYTHON_PREFIX@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SSL_LIBS = @SSL_LIBS@ STRIP = @STRIP@ VERSION = @VERSION@ YAJL_LIBS = @YAJL_LIBS@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_AR = @ac_ct_AR@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ bashcompletiondir = @bashcompletiondir@ bindir = @bindir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ datadir = @datadir@ datarootdir = @datarootdir@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ install_sh = @install_sh@ libdir = @libdir@ libexecdir = @libexecdir@ localedir = @localedir@ localstatedir = @localstatedir@ mandir = @mandir@ mkdir_p = @mkdir_p@ oldincludedir = @oldincludedir@ pdfdir = @pdfdir@ pkgpyexecdir = @pkgpyexecdir@ pkgpythondir = @pkgpythondir@ prefix = @prefix@ program_transform_name = @program_transform_name@ psdir = @psdir@ pyexecdir = @pyexecdir@ pythondir = @pythondir@ sbindir = @sbindir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ systemdsystemunitdir = @systemdsystemunitdir@ target_alias = @target_alias@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ lsmconfdir = $(sysconfdir)/lsm lsmconf_DATA = lsmd.conf EXTRA_DIST = lsmd.conf pluginconf.d/sim.conf $(am__append_2) \ $(am__append_4) pluginconfdir = $(sysconfdir)/lsm/pluginconf.d pluginconf_DATA = pluginconf.d/sim.conf $(am__append_1) \ $(am__append_3) all: all-am .SUFFIXES: $(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu config/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --gnu config/Makefile .PRECIOUS: Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs install-lsmconfDATA: $(lsmconf_DATA) @$(NORMAL_INSTALL) @list='$(lsmconf_DATA)'; test -n "$(lsmconfdir)" || list=; \ if test -n "$$list"; then \ echo " $(MKDIR_P) '$(DESTDIR)$(lsmconfdir)'"; \ $(MKDIR_P) "$(DESTDIR)$(lsmconfdir)" || exit 1; \ fi; \ for p in $$list; do \ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ echo "$$d$$p"; \ done | $(am__base_list) | \ while read files; do \ echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(lsmconfdir)'"; \ $(INSTALL_DATA) $$files "$(DESTDIR)$(lsmconfdir)" || exit $$?; \ done uninstall-lsmconfDATA: @$(NORMAL_UNINSTALL) @list='$(lsmconf_DATA)'; test -n "$(lsmconfdir)" || list=; \ files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ dir='$(DESTDIR)$(lsmconfdir)'; $(am__uninstall_files_from_dir) install-pluginconfDATA: $(pluginconf_DATA) @$(NORMAL_INSTALL) @list='$(pluginconf_DATA)'; test -n "$(pluginconfdir)" || list=; \ if test -n "$$list"; then \ echo " $(MKDIR_P) '$(DESTDIR)$(pluginconfdir)'"; \ $(MKDIR_P) "$(DESTDIR)$(pluginconfdir)" || exit 1; \ fi; \ for p in $$list; do \ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ echo "$$d$$p"; \ done | $(am__base_list) | \ while read files; do \ echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(pluginconfdir)'"; \ $(INSTALL_DATA) $$files "$(DESTDIR)$(pluginconfdir)" || exit $$?; \ done uninstall-pluginconfDATA: @$(NORMAL_UNINSTALL) @list='$(pluginconf_DATA)'; test -n "$(pluginconfdir)" || list=; \ files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ dir='$(DESTDIR)$(pluginconfdir)'; $(am__uninstall_files_from_dir) tags TAGS: ctags CTAGS: cscope cscopelist: distdir: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done check-am: all-am check: check-am all-am: Makefile $(DATA) installdirs: for dir in "$(DESTDIR)$(lsmconfdir)" "$(DESTDIR)$(pluginconfdir)"; do \ test -z "$$dir" || $(MKDIR_P) "$$dir"; \ done install: install-am install-exec: install-exec-am install-data: install-data-am uninstall: uninstall-am install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-am install-strip: if test -z '$(STRIP)'; then \ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ install; \ else \ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ fi mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-am clean-am: clean-generic clean-libtool mostlyclean-am distclean: distclean-am -rm -f Makefile distclean-am: clean-am distclean-generic dvi: dvi-am dvi-am: html: html-am html-am: info: info-am info-am: install-data-am: install-lsmconfDATA install-pluginconfDATA install-dvi: install-dvi-am install-dvi-am: install-exec-am: install-html: install-html-am install-html-am: install-info: install-info-am install-info-am: install-man: install-pdf: install-pdf-am install-pdf-am: install-ps: install-ps-am install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-am -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-am mostlyclean-am: mostlyclean-generic mostlyclean-libtool pdf: pdf-am pdf-am: ps: ps-am ps-am: uninstall-am: uninstall-lsmconfDATA uninstall-pluginconfDATA .MAKE: install-am install-strip .PHONY: all all-am check check-am clean clean-generic clean-libtool \ cscopelist-am ctags-am distclean distclean-generic \ distclean-libtool distdir dvi dvi-am html html-am info info-am \ install install-am install-data install-data-am install-dvi \ install-dvi-am install-exec install-exec-am install-html \ install-html-am install-info install-info-am \ install-lsmconfDATA install-man install-pdf install-pdf-am \ install-pluginconfDATA install-ps install-ps-am install-strip \ installcheck installcheck-am installdirs maintainer-clean \ maintainer-clean-generic mostlyclean mostlyclean-generic \ mostlyclean-libtool pdf pdf-am ps ps-am tags-am uninstall \ uninstall-am uninstall-lsmconfDATA uninstall-pluginconfDATA # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: libstoragemgmt-1.2.3/configure0000775000175000017500000237067212542455446013423 00000000000000#! /bin/sh # Guess values for system-dependent variables and create Makefiles. # Generated by GNU Autoconf 2.69 for libstoragemgmt 1.2.3. # # Report bugs to . # # # Copyright (C) 1992-1996, 1998-2012 Free Software Foundation, Inc. # # # This configure script is free software; the Free Software Foundation # gives unlimited permission to copy, distribute and modify it. ## -------------------- ## ## M4sh Initialization. ## ## -------------------- ## # Be more Bourne compatible DUALCASE=1; export DUALCASE # for MKS sh if test -n "${ZSH_VERSION+set}" && (emulate sh) >/dev/null 2>&1; then : emulate sh NULLCMD=: # Pre-4.2 versions of Zsh do word splitting on ${1+"$@"}, which # is contrary to our usage. Disable this feature. alias -g '${1+"$@"}'='"$@"' setopt NO_GLOB_SUBST else case `(set -o) 2>/dev/null` in #( *posix*) : set -o posix ;; #( *) : ;; esac fi as_nl=' ' export as_nl # Printing a long string crashes Solaris 7 /usr/bin/printf. as_echo='\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\' as_echo=$as_echo$as_echo$as_echo$as_echo$as_echo as_echo=$as_echo$as_echo$as_echo$as_echo$as_echo$as_echo # Prefer a ksh shell builtin over an external printf program on Solaris, # but without wasting forks for bash or zsh. if test -z "$BASH_VERSION$ZSH_VERSION" \ && (test "X`print -r -- $as_echo`" = "X$as_echo") 2>/dev/null; then as_echo='print -r --' as_echo_n='print -rn --' elif (test "X`printf %s $as_echo`" = "X$as_echo") 2>/dev/null; then as_echo='printf %s\n' as_echo_n='printf %s' else if test "X`(/usr/ucb/echo -n -n $as_echo) 2>/dev/null`" = "X-n $as_echo"; then as_echo_body='eval /usr/ucb/echo -n "$1$as_nl"' as_echo_n='/usr/ucb/echo -n' else as_echo_body='eval expr "X$1" : "X\\(.*\\)"' as_echo_n_body='eval arg=$1; case $arg in #( *"$as_nl"*) expr "X$arg" : "X\\(.*\\)$as_nl"; arg=`expr "X$arg" : ".*$as_nl\\(.*\\)"`;; esac; expr "X$arg" : "X\\(.*\\)" | tr -d "$as_nl" ' export as_echo_n_body as_echo_n='sh -c $as_echo_n_body as_echo' fi export as_echo_body as_echo='sh -c $as_echo_body as_echo' fi # The user is always right. if test "${PATH_SEPARATOR+set}" != set; then PATH_SEPARATOR=: (PATH='/bin;/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 && { (PATH='/bin:/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 || PATH_SEPARATOR=';' } fi # IFS # We need space, tab and new line, in precisely that order. Quoting is # there to prevent editors from complaining about space-tab. # (If _AS_PATH_WALK were called with IFS unset, it would disable word # splitting by setting IFS to empty value.) IFS=" "" $as_nl" # Find who we are. Look in the path if we contain no directory separator. as_myself= case $0 in #(( *[\\/]* ) as_myself=$0 ;; *) as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. test -r "$as_dir/$0" && as_myself=$as_dir/$0 && break done IFS=$as_save_IFS ;; esac # We did not find ourselves, most probably we were run as `sh COMMAND' # in which case we are not to be found in the path. if test "x$as_myself" = x; then as_myself=$0 fi if test ! -f "$as_myself"; then $as_echo "$as_myself: error: cannot find myself; rerun with an absolute file name" >&2 exit 1 fi # Unset variables that we do not need and which cause bugs (e.g. in # pre-3.0 UWIN ksh). But do not cause bugs in bash 2.01; the "|| exit 1" # suppresses any "Segmentation fault" message there. '((' could # trigger a bug in pdksh 5.2.14. for as_var in BASH_ENV ENV MAIL MAILPATH do eval test x\${$as_var+set} = xset \ && ( (unset $as_var) || exit 1) >/dev/null 2>&1 && unset $as_var || : done PS1='$ ' PS2='> ' PS4='+ ' # NLS nuisances. LC_ALL=C export LC_ALL LANGUAGE=C export LANGUAGE # CDPATH. (unset CDPATH) >/dev/null 2>&1 && unset CDPATH # Use a proper internal environment variable to ensure we don't fall # into an infinite loop, continuously re-executing ourselves. if test x"${_as_can_reexec}" != xno && test "x$CONFIG_SHELL" != x; then _as_can_reexec=no; export _as_can_reexec; # We cannot yet assume a decent shell, so we have to provide a # neutralization value for shells without unset; and this also # works around shells that cannot unset nonexistent variables. # Preserve -v and -x to the replacement shell. BASH_ENV=/dev/null ENV=/dev/null (unset BASH_ENV) >/dev/null 2>&1 && unset BASH_ENV ENV case $- in # (((( *v*x* | *x*v* ) as_opts=-vx ;; *v* ) as_opts=-v ;; *x* ) as_opts=-x ;; * ) as_opts= ;; esac exec $CONFIG_SHELL $as_opts "$as_myself" ${1+"$@"} # Admittedly, this is quite paranoid, since all the known shells bail # out after a failed `exec'. $as_echo "$0: could not re-execute with $CONFIG_SHELL" >&2 as_fn_exit 255 fi # We don't want this to propagate to other subprocesses. { _as_can_reexec=; unset _as_can_reexec;} if test "x$CONFIG_SHELL" = x; then as_bourne_compatible="if test -n \"\${ZSH_VERSION+set}\" && (emulate sh) >/dev/null 2>&1; then : emulate sh NULLCMD=: # Pre-4.2 versions of Zsh do word splitting on \${1+\"\$@\"}, which # is contrary to our usage. Disable this feature. alias -g '\${1+\"\$@\"}'='\"\$@\"' setopt NO_GLOB_SUBST else case \`(set -o) 2>/dev/null\` in #( *posix*) : set -o posix ;; #( *) : ;; esac fi " as_required="as_fn_return () { (exit \$1); } as_fn_success () { as_fn_return 0; } as_fn_failure () { as_fn_return 1; } as_fn_ret_success () { return 0; } as_fn_ret_failure () { return 1; } exitcode=0 as_fn_success || { exitcode=1; echo as_fn_success failed.; } as_fn_failure && { exitcode=1; echo as_fn_failure succeeded.; } as_fn_ret_success || { exitcode=1; echo as_fn_ret_success failed.; } as_fn_ret_failure && { exitcode=1; echo as_fn_ret_failure succeeded.; } if ( set x; as_fn_ret_success y && test x = \"\$1\" ); then : else exitcode=1; echo positional parameters were not saved. fi test x\$exitcode = x0 || exit 1 test -x / || exit 1" as_suggested=" as_lineno_1=";as_suggested=$as_suggested$LINENO;as_suggested=$as_suggested" as_lineno_1a=\$LINENO as_lineno_2=";as_suggested=$as_suggested$LINENO;as_suggested=$as_suggested" as_lineno_2a=\$LINENO eval 'test \"x\$as_lineno_1'\$as_run'\" != \"x\$as_lineno_2'\$as_run'\" && test \"x\`expr \$as_lineno_1'\$as_run' + 1\`\" = \"x\$as_lineno_2'\$as_run'\"' || exit 1 test -n \"\${ZSH_VERSION+set}\${BASH_VERSION+set}\" || ( ECHO='\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\' ECHO=\$ECHO\$ECHO\$ECHO\$ECHO\$ECHO ECHO=\$ECHO\$ECHO\$ECHO\$ECHO\$ECHO\$ECHO PATH=/empty FPATH=/empty; export PATH FPATH test \"X\`printf %s \$ECHO\`\" = \"X\$ECHO\" \\ || test \"X\`print -r -- \$ECHO\`\" = \"X\$ECHO\" ) || exit 1 test \$(( 1 + 1 )) = 2 || exit 1" if (eval "$as_required") 2>/dev/null; then : as_have_required=yes else as_have_required=no fi if test x$as_have_required = xyes && (eval "$as_suggested") 2>/dev/null; then : else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR as_found=false for as_dir in /bin$PATH_SEPARATOR/usr/bin$PATH_SEPARATOR$PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. as_found=: case $as_dir in #( /*) for as_base in sh bash ksh sh5; do # Try only shells that exist, to save several forks. as_shell=$as_dir/$as_base if { test -f "$as_shell" || test -f "$as_shell.exe"; } && { $as_echo "$as_bourne_compatible""$as_required" | as_run=a "$as_shell"; } 2>/dev/null; then : CONFIG_SHELL=$as_shell as_have_required=yes if { $as_echo "$as_bourne_compatible""$as_suggested" | as_run=a "$as_shell"; } 2>/dev/null; then : break 2 fi fi done;; esac as_found=false done $as_found || { if { test -f "$SHELL" || test -f "$SHELL.exe"; } && { $as_echo "$as_bourne_compatible""$as_required" | as_run=a "$SHELL"; } 2>/dev/null; then : CONFIG_SHELL=$SHELL as_have_required=yes fi; } IFS=$as_save_IFS if test "x$CONFIG_SHELL" != x; then : export CONFIG_SHELL # We cannot yet assume a decent shell, so we have to provide a # neutralization value for shells without unset; and this also # works around shells that cannot unset nonexistent variables. # Preserve -v and -x to the replacement shell. BASH_ENV=/dev/null ENV=/dev/null (unset BASH_ENV) >/dev/null 2>&1 && unset BASH_ENV ENV case $- in # (((( *v*x* | *x*v* ) as_opts=-vx ;; *v* ) as_opts=-v ;; *x* ) as_opts=-x ;; * ) as_opts= ;; esac exec $CONFIG_SHELL $as_opts "$as_myself" ${1+"$@"} # Admittedly, this is quite paranoid, since all the known shells bail # out after a failed `exec'. $as_echo "$0: could not re-execute with $CONFIG_SHELL" >&2 exit 255 fi if test x$as_have_required = xno; then : $as_echo "$0: This script requires a shell more modern than all" $as_echo "$0: the shells that I found on your system." if test x${ZSH_VERSION+set} = xset ; then $as_echo "$0: In particular, zsh $ZSH_VERSION has bugs and should" $as_echo "$0: be upgraded to zsh 4.3.4 or later." else $as_echo "$0: Please tell bug-autoconf@gnu.org and $0: libstoragemgmt-devel@lists.fedorahosted.org about your $0: system, including any error possibly output before this $0: message. Then install a modern shell, or manually run $0: the script under such a shell if you do have one." fi exit 1 fi fi fi SHELL=${CONFIG_SHELL-/bin/sh} export SHELL # Unset more variables known to interfere with behavior of common tools. CLICOLOR_FORCE= GREP_OPTIONS= unset CLICOLOR_FORCE GREP_OPTIONS ## --------------------- ## ## M4sh Shell Functions. ## ## --------------------- ## # as_fn_unset VAR # --------------- # Portably unset VAR. as_fn_unset () { { eval $1=; unset $1;} } as_unset=as_fn_unset # as_fn_set_status STATUS # ----------------------- # Set $? to STATUS, without forking. as_fn_set_status () { return $1 } # as_fn_set_status # as_fn_exit STATUS # ----------------- # Exit the shell with STATUS, even in a "trap 0" or "set -e" context. as_fn_exit () { set +e as_fn_set_status $1 exit $1 } # as_fn_exit # as_fn_mkdir_p # ------------- # Create "$as_dir" as a directory, including parents if necessary. as_fn_mkdir_p () { case $as_dir in #( -*) as_dir=./$as_dir;; esac test -d "$as_dir" || eval $as_mkdir_p || { as_dirs= while :; do case $as_dir in #( *\'*) as_qdir=`$as_echo "$as_dir" | sed "s/'/'\\\\\\\\''/g"`;; #'( *) as_qdir=$as_dir;; esac as_dirs="'$as_qdir' $as_dirs" as_dir=`$as_dirname -- "$as_dir" || $as_expr X"$as_dir" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ X"$as_dir" : 'X\(//\)[^/]' \| \ X"$as_dir" : 'X\(//\)$' \| \ X"$as_dir" : 'X\(/\)' \| . 2>/dev/null || $as_echo X"$as_dir" | sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ s//\1/ q } /^X\(\/\/\)[^/].*/{ s//\1/ q } /^X\(\/\/\)$/{ s//\1/ q } /^X\(\/\).*/{ s//\1/ q } s/.*/./; q'` test -d "$as_dir" && break done test -z "$as_dirs" || eval "mkdir $as_dirs" } || test -d "$as_dir" || as_fn_error $? "cannot create directory $as_dir" } # as_fn_mkdir_p # as_fn_executable_p FILE # ----------------------- # Test if FILE is an executable regular file. as_fn_executable_p () { test -f "$1" && test -x "$1" } # as_fn_executable_p # as_fn_append VAR VALUE # ---------------------- # Append the text in VALUE to the end of the definition contained in VAR. Take # advantage of any shell optimizations that allow amortized linear growth over # repeated appends, instead of the typical quadratic growth present in naive # implementations. if (eval "as_var=1; as_var+=2; test x\$as_var = x12") 2>/dev/null; then : eval 'as_fn_append () { eval $1+=\$2 }' else as_fn_append () { eval $1=\$$1\$2 } fi # as_fn_append # as_fn_arith ARG... # ------------------ # Perform arithmetic evaluation on the ARGs, and store the result in the # global $as_val. Take advantage of shells that can avoid forks. The arguments # must be portable across $(()) and expr. if (eval "test \$(( 1 + 1 )) = 2") 2>/dev/null; then : eval 'as_fn_arith () { as_val=$(( $* )) }' else as_fn_arith () { as_val=`expr "$@" || test $? -eq 1` } fi # as_fn_arith # as_fn_error STATUS ERROR [LINENO LOG_FD] # ---------------------------------------- # Output "`basename $0`: error: ERROR" to stderr. If LINENO and LOG_FD are # provided, also output the error to LOG_FD, referencing LINENO. Then exit the # script with STATUS, using 1 if that was 0. as_fn_error () { as_status=$1; test $as_status -eq 0 && as_status=1 if test "$4"; then as_lineno=${as_lineno-"$3"} as_lineno_stack=as_lineno_stack=$as_lineno_stack $as_echo "$as_me:${as_lineno-$LINENO}: error: $2" >&$4 fi $as_echo "$as_me: error: $2" >&2 as_fn_exit $as_status } # as_fn_error if expr a : '\(a\)' >/dev/null 2>&1 && test "X`expr 00001 : '.*\(...\)'`" = X001; then as_expr=expr else as_expr=false fi if (basename -- /) >/dev/null 2>&1 && test "X`basename -- / 2>&1`" = "X/"; then as_basename=basename else as_basename=false fi if (as_dir=`dirname -- /` && test "X$as_dir" = X/) >/dev/null 2>&1; then as_dirname=dirname else as_dirname=false fi as_me=`$as_basename -- "$0" || $as_expr X/"$0" : '.*/\([^/][^/]*\)/*$' \| \ X"$0" : 'X\(//\)$' \| \ X"$0" : 'X\(/\)' \| . 2>/dev/null || $as_echo X/"$0" | sed '/^.*\/\([^/][^/]*\)\/*$/{ s//\1/ q } /^X\/\(\/\/\)$/{ s//\1/ q } /^X\/\(\/\).*/{ s//\1/ q } s/.*/./; q'` # Avoid depending upon Character Ranges. as_cr_letters='abcdefghijklmnopqrstuvwxyz' as_cr_LETTERS='ABCDEFGHIJKLMNOPQRSTUVWXYZ' as_cr_Letters=$as_cr_letters$as_cr_LETTERS as_cr_digits='0123456789' as_cr_alnum=$as_cr_Letters$as_cr_digits as_lineno_1=$LINENO as_lineno_1a=$LINENO as_lineno_2=$LINENO as_lineno_2a=$LINENO eval 'test "x$as_lineno_1'$as_run'" != "x$as_lineno_2'$as_run'" && test "x`expr $as_lineno_1'$as_run' + 1`" = "x$as_lineno_2'$as_run'"' || { # Blame Lee E. McMahon (1931-1989) for sed's syntax. :-) sed -n ' p /[$]LINENO/= ' <$as_myself | sed ' s/[$]LINENO.*/&-/ t lineno b :lineno N :loop s/[$]LINENO\([^'$as_cr_alnum'_].*\n\)\(.*\)/\2\1\2/ t loop s/-\n.*// ' >$as_me.lineno && chmod +x "$as_me.lineno" || { $as_echo "$as_me: error: cannot create $as_me.lineno; rerun with a POSIX shell" >&2; as_fn_exit 1; } # If we had to re-execute with $CONFIG_SHELL, we're ensured to have # already done that, so ensure we don't try to do so again and fall # in an infinite loop. This has already happened in practice. _as_can_reexec=no; export _as_can_reexec # Don't try to exec as it changes $[0], causing all sort of problems # (the dirname of $[0] is not the place where we might find the # original and so on. Autoconf is especially sensitive to this). . "./$as_me.lineno" # Exit status is that of the last command. exit } ECHO_C= ECHO_N= ECHO_T= case `echo -n x` in #((((( -n*) case `echo 'xy\c'` in *c*) ECHO_T=' ';; # ECHO_T is single tab character. xy) ECHO_C='\c';; *) echo `echo ksh88 bug on AIX 6.1` > /dev/null ECHO_T=' ';; esac;; *) ECHO_N='-n';; esac rm -f conf$$ conf$$.exe conf$$.file if test -d conf$$.dir; then rm -f conf$$.dir/conf$$.file else rm -f conf$$.dir mkdir conf$$.dir 2>/dev/null fi if (echo >conf$$.file) 2>/dev/null; then if ln -s conf$$.file conf$$ 2>/dev/null; then as_ln_s='ln -s' # ... but there are two gotchas: # 1) On MSYS, both `ln -s file dir' and `ln file dir' fail. # 2) DJGPP < 2.04 has no symlinks; `ln -s' creates a wrapper executable. # In both cases, we have to default to `cp -pR'. ln -s conf$$.file conf$$.dir 2>/dev/null && test ! -f conf$$.exe || as_ln_s='cp -pR' elif ln conf$$.file conf$$ 2>/dev/null; then as_ln_s=ln else as_ln_s='cp -pR' fi else as_ln_s='cp -pR' fi rm -f conf$$ conf$$.exe conf$$.dir/conf$$.file conf$$.file rmdir conf$$.dir 2>/dev/null if mkdir -p . 2>/dev/null; then as_mkdir_p='mkdir -p "$as_dir"' else test -d ./-p && rmdir ./-p as_mkdir_p=false fi as_test_x='test -x' as_executable_p=as_fn_executable_p # Sed expression to map a string onto a valid CPP name. as_tr_cpp="eval sed 'y%*$as_cr_letters%P$as_cr_LETTERS%;s%[^_$as_cr_alnum]%_%g'" # Sed expression to map a string onto a valid variable name. as_tr_sh="eval sed 'y%*+%pp%;s%[^_$as_cr_alnum]%_%g'" SHELL=${CONFIG_SHELL-/bin/sh} test -n "$DJDIR" || exec 7<&0 &1 # Name of the host. # hostname on some systems (SVR3.2, old GNU/Linux) returns a bogus exit status, # so uname gets run too. ac_hostname=`(hostname || uname -n) 2>/dev/null | sed 1q` # # Initializations. # ac_default_prefix=/usr/local ac_clean_files= ac_config_libobj_dir=. LIBOBJS= cross_compiling=no subdirs= MFLAGS= MAKEFLAGS= # Identity of this package. PACKAGE_NAME='libstoragemgmt' PACKAGE_TARNAME='libstoragemgmt' PACKAGE_VERSION='1.2.3' PACKAGE_STRING='libstoragemgmt 1.2.3' PACKAGE_BUGREPORT='libstoragemgmt-devel@lists.fedorahosted.org' PACKAGE_URL='https://github.com/libstorage/libstoragemgmt/' ac_unique_file="configure.ac" # Factoring default headers for most tests. ac_includes_default="\ #include #ifdef HAVE_SYS_TYPES_H # include #endif #ifdef HAVE_SYS_STAT_H # include #endif #ifdef STDC_HEADERS # include # include #else # ifdef HAVE_STDLIB_H # include # endif #endif #ifdef HAVE_STRING_H # if !defined STDC_HEADERS && defined HAVE_MEMORY_H # include # endif # include #endif #ifdef HAVE_STRINGS_H # include #endif #ifdef HAVE_INTTYPES_H # include #endif #ifdef HAVE_STDINT_H # include #endif #ifdef HAVE_UNISTD_H # include #endif" ac_subst_vars='am__EXEEXT_FALSE am__EXEEXT_TRUE LTLIBOBJS HAVE_SYSTEMD_FALSE HAVE_SYSTEMD_TRUE systemdsystemunitdir bashcompletiondir WITH_BASH_COMPLETION_FALSE WITH_BASH_COMPLETION_TRUE LIBCONFIG_LIBS LIBCONFIG_CFLAGS WITH_HPSA_FALSE WITH_HPSA_TRUE WITH_MEGARAID_FALSE WITH_MEGARAID_TRUE WITH_REST_API_FALSE WITH_REST_API_TRUE JSON_LIBS JSON_CFLAGS LIBMICROHTTPD_LIBS LIBMICROHTTPD_CFLAGS pkgpyexecdir pyexecdir pkgpythondir pythondir PYTHON_PLATFORM PYTHON_EXEC_PREFIX PYTHON_PREFIX PYTHON_VERSION PYTHON PERL BUILD_C_UNIT_FALSE BUILD_C_UNIT_TRUE LIBCHECK_LIBS LIBCHECK_CFLAGS LIBGLIB_LIBS LIBGLIB_CFLAGS LIBXML_LIBS LIBXML_CFLAGS YAJL_LIBS SSL_LIBS LIBOBJS CXXCPP OTOOL64 OTOOL LIPO NMEDIT DSYMUTIL MANIFEST_TOOL RANLIB ac_ct_AR AR DLLTOOL OBJDUMP NM ac_ct_DUMPBIN DUMPBIN LD FGREP EGREP GREP SED LIBTOOL CPP LN_S am__fastdepCXX_FALSE am__fastdepCXX_TRUE CXXDEPMODE ac_ct_CXX CXXFLAGS CXX am__fastdepCC_FALSE am__fastdepCC_TRUE CCDEPMODE am__nodep AMDEPBACKSLASH AMDEP_FALSE AMDEP_TRUE am__quote am__include DEPDIR OBJEXT EXEEXT ac_ct_CC CPPFLAGS LDFLAGS CFLAGS CC LIBSM_LIBTOOL_VERSION LIBSM_VERSION_NUMBER LIBSM_VERSION_INFO LIBSM_VERSION LIBSM_MICRO_VERSION LIBSM_MINOR_VERSION LIBSM_MAJOR_VERSION host_os host_vendor host_cpu host build_os build_vendor build_cpu build PKG_CONFIG_LIBDIR PKG_CONFIG_PATH PKG_CONFIG MAINT MAINTAINER_MODE_FALSE MAINTAINER_MODE_TRUE AM_BACKSLASH AM_DEFAULT_VERBOSITY AM_DEFAULT_V AM_V am__untar am__tar AMTAR am__leading_dot SET_MAKE AWK mkdir_p MKDIR_P INSTALL_STRIP_PROGRAM STRIP install_sh MAKEINFO AUTOHEADER AUTOMAKE AUTOCONF ACLOCAL VERSION PACKAGE CYGPATH_W am__isrc INSTALL_DATA INSTALL_SCRIPT INSTALL_PROGRAM target_alias host_alias build_alias LIBS ECHO_T ECHO_N ECHO_C DEFS mandir localedir libdir psdir pdfdir dvidir htmldir infodir docdir oldincludedir includedir localstatedir sharedstatedir sysconfdir datadir datarootdir libexecdir sbindir bindir program_transform_name prefix exec_prefix PACKAGE_URL PACKAGE_BUGREPORT PACKAGE_STRING PACKAGE_VERSION PACKAGE_TARNAME PACKAGE_NAME PATH_SEPARATOR SHELL' ac_subst_files='' ac_user_opts=' enable_option_checking enable_silent_rules enable_maintainer_mode enable_dependency_tracking enable_shared enable_static with_pic enable_fast_install with_gnu_ld with_sysroot enable_libtool_lock enable_build_c_unit with_rest_api with_megaraid with_hpsa with_bash_completion with_bash_completion_dir with_systemdsystemunitdir ' ac_precious_vars='build_alias host_alias target_alias PKG_CONFIG PKG_CONFIG_PATH PKG_CONFIG_LIBDIR CC CFLAGS LDFLAGS LIBS CPPFLAGS CXX CXXFLAGS CCC CPP CXXCPP LIBXML_CFLAGS LIBXML_LIBS LIBGLIB_CFLAGS LIBGLIB_LIBS LIBCHECK_CFLAGS LIBCHECK_LIBS PYTHON LIBMICROHTTPD_CFLAGS LIBMICROHTTPD_LIBS JSON_CFLAGS JSON_LIBS LIBCONFIG_CFLAGS LIBCONFIG_LIBS' # Initialize some variables set by options. ac_init_help= ac_init_version=false ac_unrecognized_opts= ac_unrecognized_sep= # The variables have the same names as the options, with # dashes changed to underlines. cache_file=/dev/null exec_prefix=NONE no_create= no_recursion= prefix=NONE program_prefix=NONE program_suffix=NONE program_transform_name=s,x,x, silent= site= srcdir= verbose= x_includes=NONE x_libraries=NONE # Installation directory options. # These are left unexpanded so users can "make install exec_prefix=/foo" # and all the variables that are supposed to be based on exec_prefix # by default will actually change. # Use braces instead of parens because sh, perl, etc. also accept them. # (The list follows the same order as the GNU Coding Standards.) bindir='${exec_prefix}/bin' sbindir='${exec_prefix}/sbin' libexecdir='${exec_prefix}/libexec' datarootdir='${prefix}/share' datadir='${datarootdir}' sysconfdir='${prefix}/etc' sharedstatedir='${prefix}/com' localstatedir='${prefix}/var' includedir='${prefix}/include' oldincludedir='/usr/include' docdir='${datarootdir}/doc/${PACKAGE_TARNAME}' infodir='${datarootdir}/info' htmldir='${docdir}' dvidir='${docdir}' pdfdir='${docdir}' psdir='${docdir}' libdir='${exec_prefix}/lib' localedir='${datarootdir}/locale' mandir='${datarootdir}/man' ac_prev= ac_dashdash= for ac_option do # If the previous option needs an argument, assign it. if test -n "$ac_prev"; then eval $ac_prev=\$ac_option ac_prev= continue fi case $ac_option in *=?*) ac_optarg=`expr "X$ac_option" : '[^=]*=\(.*\)'` ;; *=) ac_optarg= ;; *) ac_optarg=yes ;; esac # Accept the important Cygnus configure options, so we can diagnose typos. case $ac_dashdash$ac_option in --) ac_dashdash=yes ;; -bindir | --bindir | --bindi | --bind | --bin | --bi) ac_prev=bindir ;; -bindir=* | --bindir=* | --bindi=* | --bind=* | --bin=* | --bi=*) bindir=$ac_optarg ;; -build | --build | --buil | --bui | --bu) ac_prev=build_alias ;; -build=* | --build=* | --buil=* | --bui=* | --bu=*) build_alias=$ac_optarg ;; -cache-file | --cache-file | --cache-fil | --cache-fi \ | --cache-f | --cache- | --cache | --cach | --cac | --ca | --c) ac_prev=cache_file ;; -cache-file=* | --cache-file=* | --cache-fil=* | --cache-fi=* \ | --cache-f=* | --cache-=* | --cache=* | --cach=* | --cac=* | --ca=* | --c=*) cache_file=$ac_optarg ;; --config-cache | -C) cache_file=config.cache ;; -datadir | --datadir | --datadi | --datad) ac_prev=datadir ;; -datadir=* | --datadir=* | --datadi=* | --datad=*) datadir=$ac_optarg ;; -datarootdir | --datarootdir | --datarootdi | --datarootd | --dataroot \ | --dataroo | --dataro | --datar) ac_prev=datarootdir ;; -datarootdir=* | --datarootdir=* | --datarootdi=* | --datarootd=* \ | --dataroot=* | --dataroo=* | --dataro=* | --datar=*) datarootdir=$ac_optarg ;; -disable-* | --disable-*) ac_useropt=`expr "x$ac_option" : 'x-*disable-\(.*\)'` # Reject names that are not valid shell variable names. expr "x$ac_useropt" : ".*[^-+._$as_cr_alnum]" >/dev/null && as_fn_error $? "invalid feature name: $ac_useropt" ac_useropt_orig=$ac_useropt ac_useropt=`$as_echo "$ac_useropt" | sed 's/[-+.]/_/g'` case $ac_user_opts in *" "enable_$ac_useropt" "*) ;; *) ac_unrecognized_opts="$ac_unrecognized_opts$ac_unrecognized_sep--disable-$ac_useropt_orig" ac_unrecognized_sep=', ';; esac eval enable_$ac_useropt=no ;; -docdir | --docdir | --docdi | --doc | --do) ac_prev=docdir ;; -docdir=* | --docdir=* | --docdi=* | --doc=* | --do=*) docdir=$ac_optarg ;; -dvidir | --dvidir | --dvidi | --dvid | --dvi | --dv) ac_prev=dvidir ;; -dvidir=* | --dvidir=* | --dvidi=* | --dvid=* | --dvi=* | --dv=*) dvidir=$ac_optarg ;; -enable-* | --enable-*) ac_useropt=`expr "x$ac_option" : 'x-*enable-\([^=]*\)'` # Reject names that are not valid shell variable names. expr "x$ac_useropt" : ".*[^-+._$as_cr_alnum]" >/dev/null && as_fn_error $? "invalid feature name: $ac_useropt" ac_useropt_orig=$ac_useropt ac_useropt=`$as_echo "$ac_useropt" | sed 's/[-+.]/_/g'` case $ac_user_opts in *" "enable_$ac_useropt" "*) ;; *) ac_unrecognized_opts="$ac_unrecognized_opts$ac_unrecognized_sep--enable-$ac_useropt_orig" ac_unrecognized_sep=', ';; esac eval enable_$ac_useropt=\$ac_optarg ;; -exec-prefix | --exec_prefix | --exec-prefix | --exec-prefi \ | --exec-pref | --exec-pre | --exec-pr | --exec-p | --exec- \ | --exec | --exe | --ex) ac_prev=exec_prefix ;; -exec-prefix=* | --exec_prefix=* | --exec-prefix=* | --exec-prefi=* \ | --exec-pref=* | --exec-pre=* | --exec-pr=* | --exec-p=* | --exec-=* \ | --exec=* | --exe=* | --ex=*) exec_prefix=$ac_optarg ;; -gas | --gas | --ga | --g) # Obsolete; use --with-gas. with_gas=yes ;; -help | --help | --hel | --he | -h) ac_init_help=long ;; -help=r* | --help=r* | --hel=r* | --he=r* | -hr*) ac_init_help=recursive ;; -help=s* | --help=s* | --hel=s* | --he=s* | -hs*) ac_init_help=short ;; -host | --host | --hos | --ho) ac_prev=host_alias ;; -host=* | --host=* | --hos=* | --ho=*) host_alias=$ac_optarg ;; -htmldir | --htmldir | --htmldi | --htmld | --html | --htm | --ht) ac_prev=htmldir ;; -htmldir=* | --htmldir=* | --htmldi=* | --htmld=* | --html=* | --htm=* \ | --ht=*) htmldir=$ac_optarg ;; -includedir | --includedir | --includedi | --included | --include \ | --includ | --inclu | --incl | --inc) ac_prev=includedir ;; -includedir=* | --includedir=* | --includedi=* | --included=* | --include=* \ | --includ=* | --inclu=* | --incl=* | --inc=*) includedir=$ac_optarg ;; -infodir | --infodir | --infodi | --infod | --info | --inf) ac_prev=infodir ;; -infodir=* | --infodir=* | --infodi=* | --infod=* | --info=* | --inf=*) infodir=$ac_optarg ;; -libdir | --libdir | --libdi | --libd) ac_prev=libdir ;; -libdir=* | --libdir=* | --libdi=* | --libd=*) libdir=$ac_optarg ;; -libexecdir | --libexecdir | --libexecdi | --libexecd | --libexec \ | --libexe | --libex | --libe) ac_prev=libexecdir ;; -libexecdir=* | --libexecdir=* | --libexecdi=* | --libexecd=* | --libexec=* \ | --libexe=* | --libex=* | --libe=*) libexecdir=$ac_optarg ;; -localedir | --localedir | --localedi | --localed | --locale) ac_prev=localedir ;; -localedir=* | --localedir=* | --localedi=* | --localed=* | --locale=*) localedir=$ac_optarg ;; -localstatedir | --localstatedir | --localstatedi | --localstated \ | --localstate | --localstat | --localsta | --localst | --locals) ac_prev=localstatedir ;; -localstatedir=* | --localstatedir=* | --localstatedi=* | --localstated=* \ | --localstate=* | --localstat=* | --localsta=* | --localst=* | --locals=*) localstatedir=$ac_optarg ;; -mandir | --mandir | --mandi | --mand | --man | --ma | --m) ac_prev=mandir ;; -mandir=* | --mandir=* | --mandi=* | --mand=* | --man=* | --ma=* | --m=*) mandir=$ac_optarg ;; -nfp | --nfp | --nf) # Obsolete; use --without-fp. with_fp=no ;; -no-create | --no-create | --no-creat | --no-crea | --no-cre \ | --no-cr | --no-c | -n) no_create=yes ;; -no-recursion | --no-recursion | --no-recursio | --no-recursi \ | --no-recurs | --no-recur | --no-recu | --no-rec | --no-re | --no-r) no_recursion=yes ;; -oldincludedir | --oldincludedir | --oldincludedi | --oldincluded \ | --oldinclude | --oldinclud | --oldinclu | --oldincl | --oldinc \ | --oldin | --oldi | --old | --ol | --o) ac_prev=oldincludedir ;; -oldincludedir=* | --oldincludedir=* | --oldincludedi=* | --oldincluded=* \ | --oldinclude=* | --oldinclud=* | --oldinclu=* | --oldincl=* | --oldinc=* \ | --oldin=* | --oldi=* | --old=* | --ol=* | --o=*) oldincludedir=$ac_optarg ;; -prefix | --prefix | --prefi | --pref | --pre | --pr | --p) ac_prev=prefix ;; -prefix=* | --prefix=* | --prefi=* | --pref=* | --pre=* | --pr=* | --p=*) prefix=$ac_optarg ;; -program-prefix | --program-prefix | --program-prefi | --program-pref \ | --program-pre | --program-pr | --program-p) ac_prev=program_prefix ;; -program-prefix=* | --program-prefix=* | --program-prefi=* \ | --program-pref=* | --program-pre=* | --program-pr=* | --program-p=*) program_prefix=$ac_optarg ;; -program-suffix | --program-suffix | --program-suffi | --program-suff \ | --program-suf | --program-su | --program-s) ac_prev=program_suffix ;; -program-suffix=* | --program-suffix=* | --program-suffi=* \ | --program-suff=* | --program-suf=* | --program-su=* | --program-s=*) program_suffix=$ac_optarg ;; -program-transform-name | --program-transform-name \ | --program-transform-nam | --program-transform-na \ | --program-transform-n | --program-transform- \ | --program-transform | --program-transfor \ | --program-transfo | --program-transf \ | --program-trans | --program-tran \ | --progr-tra | --program-tr | --program-t) ac_prev=program_transform_name ;; -program-transform-name=* | --program-transform-name=* \ | --program-transform-nam=* | --program-transform-na=* \ | --program-transform-n=* | --program-transform-=* \ | --program-transform=* | --program-transfor=* \ | --program-transfo=* | --program-transf=* \ | --program-trans=* | --program-tran=* \ | --progr-tra=* | --program-tr=* | --program-t=*) program_transform_name=$ac_optarg ;; -pdfdir | --pdfdir | --pdfdi | --pdfd | --pdf | --pd) ac_prev=pdfdir ;; -pdfdir=* | --pdfdir=* | --pdfdi=* | --pdfd=* | --pdf=* | --pd=*) pdfdir=$ac_optarg ;; -psdir | --psdir | --psdi | --psd | --ps) ac_prev=psdir ;; -psdir=* | --psdir=* | --psdi=* | --psd=* | --ps=*) psdir=$ac_optarg ;; -q | -quiet | --quiet | --quie | --qui | --qu | --q \ | -silent | --silent | --silen | --sile | --sil) silent=yes ;; -sbindir | --sbindir | --sbindi | --sbind | --sbin | --sbi | --sb) ac_prev=sbindir ;; -sbindir=* | --sbindir=* | --sbindi=* | --sbind=* | --sbin=* \ | --sbi=* | --sb=*) sbindir=$ac_optarg ;; -sharedstatedir | --sharedstatedir | --sharedstatedi \ | --sharedstated | --sharedstate | --sharedstat | --sharedsta \ | --sharedst | --shareds | --shared | --share | --shar \ | --sha | --sh) ac_prev=sharedstatedir ;; -sharedstatedir=* | --sharedstatedir=* | --sharedstatedi=* \ | --sharedstated=* | --sharedstate=* | --sharedstat=* | --sharedsta=* \ | --sharedst=* | --shareds=* | --shared=* | --share=* | --shar=* \ | --sha=* | --sh=*) sharedstatedir=$ac_optarg ;; -site | --site | --sit) ac_prev=site ;; -site=* | --site=* | --sit=*) site=$ac_optarg ;; -srcdir | --srcdir | --srcdi | --srcd | --src | --sr) ac_prev=srcdir ;; -srcdir=* | --srcdir=* | --srcdi=* | --srcd=* | --src=* | --sr=*) srcdir=$ac_optarg ;; -sysconfdir | --sysconfdir | --sysconfdi | --sysconfd | --sysconf \ | --syscon | --sysco | --sysc | --sys | --sy) ac_prev=sysconfdir ;; -sysconfdir=* | --sysconfdir=* | --sysconfdi=* | --sysconfd=* | --sysconf=* \ | --syscon=* | --sysco=* | --sysc=* | --sys=* | --sy=*) sysconfdir=$ac_optarg ;; -target | --target | --targe | --targ | --tar | --ta | --t) ac_prev=target_alias ;; -target=* | --target=* | --targe=* | --targ=* | --tar=* | --ta=* | --t=*) target_alias=$ac_optarg ;; -v | -verbose | --verbose | --verbos | --verbo | --verb) verbose=yes ;; -version | --version | --versio | --versi | --vers | -V) ac_init_version=: ;; -with-* | --with-*) ac_useropt=`expr "x$ac_option" : 'x-*with-\([^=]*\)'` # Reject names that are not valid shell variable names. expr "x$ac_useropt" : ".*[^-+._$as_cr_alnum]" >/dev/null && as_fn_error $? "invalid package name: $ac_useropt" ac_useropt_orig=$ac_useropt ac_useropt=`$as_echo "$ac_useropt" | sed 's/[-+.]/_/g'` case $ac_user_opts in *" "with_$ac_useropt" "*) ;; *) ac_unrecognized_opts="$ac_unrecognized_opts$ac_unrecognized_sep--with-$ac_useropt_orig" ac_unrecognized_sep=', ';; esac eval with_$ac_useropt=\$ac_optarg ;; -without-* | --without-*) ac_useropt=`expr "x$ac_option" : 'x-*without-\(.*\)'` # Reject names that are not valid shell variable names. expr "x$ac_useropt" : ".*[^-+._$as_cr_alnum]" >/dev/null && as_fn_error $? "invalid package name: $ac_useropt" ac_useropt_orig=$ac_useropt ac_useropt=`$as_echo "$ac_useropt" | sed 's/[-+.]/_/g'` case $ac_user_opts in *" "with_$ac_useropt" "*) ;; *) ac_unrecognized_opts="$ac_unrecognized_opts$ac_unrecognized_sep--without-$ac_useropt_orig" ac_unrecognized_sep=', ';; esac eval with_$ac_useropt=no ;; --x) # Obsolete; use --with-x. with_x=yes ;; -x-includes | --x-includes | --x-include | --x-includ | --x-inclu \ | --x-incl | --x-inc | --x-in | --x-i) ac_prev=x_includes ;; -x-includes=* | --x-includes=* | --x-include=* | --x-includ=* | --x-inclu=* \ | --x-incl=* | --x-inc=* | --x-in=* | --x-i=*) x_includes=$ac_optarg ;; -x-libraries | --x-libraries | --x-librarie | --x-librari \ | --x-librar | --x-libra | --x-libr | --x-lib | --x-li | --x-l) ac_prev=x_libraries ;; -x-libraries=* | --x-libraries=* | --x-librarie=* | --x-librari=* \ | --x-librar=* | --x-libra=* | --x-libr=* | --x-lib=* | --x-li=* | --x-l=*) x_libraries=$ac_optarg ;; -*) as_fn_error $? "unrecognized option: \`$ac_option' Try \`$0 --help' for more information" ;; *=*) ac_envvar=`expr "x$ac_option" : 'x\([^=]*\)='` # Reject names that are not valid shell variable names. case $ac_envvar in #( '' | [0-9]* | *[!_$as_cr_alnum]* ) as_fn_error $? "invalid variable name: \`$ac_envvar'" ;; esac eval $ac_envvar=\$ac_optarg export $ac_envvar ;; *) # FIXME: should be removed in autoconf 3.0. $as_echo "$as_me: WARNING: you should use --build, --host, --target" >&2 expr "x$ac_option" : ".*[^-._$as_cr_alnum]" >/dev/null && $as_echo "$as_me: WARNING: invalid host type: $ac_option" >&2 : "${build_alias=$ac_option} ${host_alias=$ac_option} ${target_alias=$ac_option}" ;; esac done if test -n "$ac_prev"; then ac_option=--`echo $ac_prev | sed 's/_/-/g'` as_fn_error $? "missing argument to $ac_option" fi if test -n "$ac_unrecognized_opts"; then case $enable_option_checking in no) ;; fatal) as_fn_error $? "unrecognized options: $ac_unrecognized_opts" ;; *) $as_echo "$as_me: WARNING: unrecognized options: $ac_unrecognized_opts" >&2 ;; esac fi # Check all directory arguments for consistency. for ac_var in exec_prefix prefix bindir sbindir libexecdir datarootdir \ datadir sysconfdir sharedstatedir localstatedir includedir \ oldincludedir docdir infodir htmldir dvidir pdfdir psdir \ libdir localedir mandir do eval ac_val=\$$ac_var # Remove trailing slashes. case $ac_val in */ ) ac_val=`expr "X$ac_val" : 'X\(.*[^/]\)' \| "X$ac_val" : 'X\(.*\)'` eval $ac_var=\$ac_val;; esac # Be sure to have absolute directory names. case $ac_val in [\\/$]* | ?:[\\/]* ) continue;; NONE | '' ) case $ac_var in *prefix ) continue;; esac;; esac as_fn_error $? "expected an absolute directory name for --$ac_var: $ac_val" done # There might be people who depend on the old broken behavior: `$host' # used to hold the argument of --host etc. # FIXME: To remove some day. build=$build_alias host=$host_alias target=$target_alias # FIXME: To remove some day. if test "x$host_alias" != x; then if test "x$build_alias" = x; then cross_compiling=maybe elif test "x$build_alias" != "x$host_alias"; then cross_compiling=yes fi fi ac_tool_prefix= test -n "$host_alias" && ac_tool_prefix=$host_alias- test "$silent" = yes && exec 6>/dev/null ac_pwd=`pwd` && test -n "$ac_pwd" && ac_ls_di=`ls -di .` && ac_pwd_ls_di=`cd "$ac_pwd" && ls -di .` || as_fn_error $? "working directory cannot be determined" test "X$ac_ls_di" = "X$ac_pwd_ls_di" || as_fn_error $? "pwd does not report name of working directory" # Find the source files, if location was not specified. if test -z "$srcdir"; then ac_srcdir_defaulted=yes # Try the directory containing this script, then the parent directory. ac_confdir=`$as_dirname -- "$as_myself" || $as_expr X"$as_myself" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ X"$as_myself" : 'X\(//\)[^/]' \| \ X"$as_myself" : 'X\(//\)$' \| \ X"$as_myself" : 'X\(/\)' \| . 2>/dev/null || $as_echo X"$as_myself" | sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ s//\1/ q } /^X\(\/\/\)[^/].*/{ s//\1/ q } /^X\(\/\/\)$/{ s//\1/ q } /^X\(\/\).*/{ s//\1/ q } s/.*/./; q'` srcdir=$ac_confdir if test ! -r "$srcdir/$ac_unique_file"; then srcdir=.. fi else ac_srcdir_defaulted=no fi if test ! -r "$srcdir/$ac_unique_file"; then test "$ac_srcdir_defaulted" = yes && srcdir="$ac_confdir or .." as_fn_error $? "cannot find sources ($ac_unique_file) in $srcdir" fi ac_msg="sources are in $srcdir, but \`cd $srcdir' does not work" ac_abs_confdir=`( cd "$srcdir" && test -r "./$ac_unique_file" || as_fn_error $? "$ac_msg" pwd)` # When building in place, set srcdir=. if test "$ac_abs_confdir" = "$ac_pwd"; then srcdir=. fi # Remove unnecessary trailing slashes from srcdir. # Double slashes in file names in object file debugging info # mess up M-x gdb in Emacs. case $srcdir in */) srcdir=`expr "X$srcdir" : 'X\(.*[^/]\)' \| "X$srcdir" : 'X\(.*\)'`;; esac for ac_var in $ac_precious_vars; do eval ac_env_${ac_var}_set=\${${ac_var}+set} eval ac_env_${ac_var}_value=\$${ac_var} eval ac_cv_env_${ac_var}_set=\${${ac_var}+set} eval ac_cv_env_${ac_var}_value=\$${ac_var} done # # Report the --help message. # if test "$ac_init_help" = "long"; then # Omit some internal or obsolete options to make the list less imposing. # This message is too long to be a string in the A/UX 3.1 sh. cat <<_ACEOF \`configure' configures libstoragemgmt 1.2.3 to adapt to many kinds of systems. Usage: $0 [OPTION]... [VAR=VALUE]... To assign environment variables (e.g., CC, CFLAGS...), specify them as VAR=VALUE. See below for descriptions of some of the useful variables. Defaults for the options are specified in brackets. Configuration: -h, --help display this help and exit --help=short display options specific to this package --help=recursive display the short help of all the included packages -V, --version display version information and exit -q, --quiet, --silent do not print \`checking ...' messages --cache-file=FILE cache test results in FILE [disabled] -C, --config-cache alias for \`--cache-file=config.cache' -n, --no-create do not create output files --srcdir=DIR find the sources in DIR [configure dir or \`..'] Installation directories: --prefix=PREFIX install architecture-independent files in PREFIX [$ac_default_prefix] --exec-prefix=EPREFIX install architecture-dependent files in EPREFIX [PREFIX] By default, \`make install' will install all the files in \`$ac_default_prefix/bin', \`$ac_default_prefix/lib' etc. You can specify an installation prefix other than \`$ac_default_prefix' using \`--prefix', for instance \`--prefix=\$HOME'. For better control, use the options below. Fine tuning of the installation directories: --bindir=DIR user executables [EPREFIX/bin] --sbindir=DIR system admin executables [EPREFIX/sbin] --libexecdir=DIR program executables [EPREFIX/libexec] --sysconfdir=DIR read-only single-machine data [PREFIX/etc] --sharedstatedir=DIR modifiable architecture-independent data [PREFIX/com] --localstatedir=DIR modifiable single-machine data [PREFIX/var] --libdir=DIR object code libraries [EPREFIX/lib] --includedir=DIR C header files [PREFIX/include] --oldincludedir=DIR C header files for non-gcc [/usr/include] --datarootdir=DIR read-only arch.-independent data root [PREFIX/share] --datadir=DIR read-only architecture-independent data [DATAROOTDIR] --infodir=DIR info documentation [DATAROOTDIR/info] --localedir=DIR locale-dependent data [DATAROOTDIR/locale] --mandir=DIR man documentation [DATAROOTDIR/man] --docdir=DIR documentation root [DATAROOTDIR/doc/libstoragemgmt] --htmldir=DIR html documentation [DOCDIR] --dvidir=DIR dvi documentation [DOCDIR] --pdfdir=DIR pdf documentation [DOCDIR] --psdir=DIR ps documentation [DOCDIR] _ACEOF cat <<\_ACEOF Program names: --program-prefix=PREFIX prepend PREFIX to installed program names --program-suffix=SUFFIX append SUFFIX to installed program names --program-transform-name=PROGRAM run sed PROGRAM on installed program names System types: --build=BUILD configure for building on BUILD [guessed] --host=HOST cross-compile to build programs to run on HOST [BUILD] _ACEOF fi if test -n "$ac_init_help"; then case $ac_init_help in short | recursive ) echo "Configuration of libstoragemgmt 1.2.3:";; esac cat <<\_ACEOF Optional Features: --disable-option-checking ignore unrecognized --enable/--with options --disable-FEATURE do not include FEATURE (same as --enable-FEATURE=no) --enable-FEATURE[=ARG] include FEATURE [ARG=yes] --enable-silent-rules less verbose build output (undo: "make V=1") --disable-silent-rules verbose build output (undo: "make V=0") --disable-maintainer-mode disable make rules and dependencies not useful (and sometimes confusing) to the casual installer --enable-dependency-tracking do not reject slow dependency extractors --disable-dependency-tracking speeds up one-time build --enable-shared[=PKGS] build shared libraries [default=yes] --enable-static[=PKGS] build static libraries [default=yes] --enable-fast-install[=PKGS] optimize for fast installation [default=yes] --disable-libtool-lock avoid locking (might break parallel builds) --disable-build-c-unit disable building C unit test case. Optional Packages: --with-PACKAGE[=ARG] use PACKAGE [ARG=yes] --without-PACKAGE do not use PACKAGE (same as --with-PACKAGE=no) --with-pic[=PKGS] try to use only PIC/non-PIC objects [default=use both] --with-gnu-ld assume the C compiler uses GNU ld [default=no] --with-sysroot=DIR Search for dependent libraries within DIR (or the compiler's sysroot if not specified). --without-rest-api Do not build the REST API daemon --without-megaraid Do not build the MegaRAID plugin --without-hpsa Do not build the HP SmartArray plugin --without-bash-completion Do not install the bash auto-completion script --with-bash-completion-dir=DIR Bash completions directory --with-systemdsystemunitdir=DIR Directory for systemd service files Some influential environment variables: PKG_CONFIG path to pkg-config utility PKG_CONFIG_PATH directories to add to pkg-config's search path PKG_CONFIG_LIBDIR path overriding pkg-config's built-in search path CC C compiler command CFLAGS C compiler flags LDFLAGS linker flags, e.g. -L if you have libraries in a nonstandard directory LIBS libraries to pass to the linker, e.g. -l CPPFLAGS (Objective) C/C++ preprocessor flags, e.g. -I if you have headers in a nonstandard directory CXX C++ compiler command CXXFLAGS C++ compiler flags CPP C preprocessor CXXCPP C++ preprocessor LIBXML_CFLAGS C compiler flags for LIBXML, overriding pkg-config LIBXML_LIBS linker flags for LIBXML, overriding pkg-config LIBGLIB_CFLAGS C compiler flags for LIBGLIB, overriding pkg-config LIBGLIB_LIBS linker flags for LIBGLIB, overriding pkg-config LIBCHECK_CFLAGS C compiler flags for LIBCHECK, overriding pkg-config LIBCHECK_LIBS linker flags for LIBCHECK, overriding pkg-config PYTHON the Python interpreter LIBMICROHTTPD_CFLAGS C compiler flags for LIBMICROHTTPD, overriding pkg-config LIBMICROHTTPD_LIBS linker flags for LIBMICROHTTPD, overriding pkg-config JSON_CFLAGS C compiler flags for JSON, overriding pkg-config JSON_LIBS linker flags for JSON, overriding pkg-config LIBCONFIG_CFLAGS C compiler flags for LIBCONFIG, overriding pkg-config LIBCONFIG_LIBS linker flags for LIBCONFIG, overriding pkg-config Use these variables to override the choices made by `configure' or to help it to find libraries and programs with nonstandard names/locations. Report bugs to . libstoragemgmt home page: . _ACEOF ac_status=$? fi if test "$ac_init_help" = "recursive"; then # If there are subdirs, report their specific --help. for ac_dir in : $ac_subdirs_all; do test "x$ac_dir" = x: && continue test -d "$ac_dir" || { cd "$srcdir" && ac_pwd=`pwd` && srcdir=. && test -d "$ac_dir"; } || continue ac_builddir=. case "$ac_dir" in .) ac_dir_suffix= ac_top_builddir_sub=. ac_top_build_prefix= ;; *) ac_dir_suffix=/`$as_echo "$ac_dir" | sed 's|^\.[\\/]||'` # A ".." for each directory in $ac_dir_suffix. ac_top_builddir_sub=`$as_echo "$ac_dir_suffix" | sed 's|/[^\\/]*|/..|g;s|/||'` case $ac_top_builddir_sub in "") ac_top_builddir_sub=. ac_top_build_prefix= ;; *) ac_top_build_prefix=$ac_top_builddir_sub/ ;; esac ;; esac ac_abs_top_builddir=$ac_pwd ac_abs_builddir=$ac_pwd$ac_dir_suffix # for backward compatibility: ac_top_builddir=$ac_top_build_prefix case $srcdir in .) # We are building in place. ac_srcdir=. ac_top_srcdir=$ac_top_builddir_sub ac_abs_top_srcdir=$ac_pwd ;; [\\/]* | ?:[\\/]* ) # Absolute name. ac_srcdir=$srcdir$ac_dir_suffix; ac_top_srcdir=$srcdir ac_abs_top_srcdir=$srcdir ;; *) # Relative name. ac_srcdir=$ac_top_build_prefix$srcdir$ac_dir_suffix ac_top_srcdir=$ac_top_build_prefix$srcdir ac_abs_top_srcdir=$ac_pwd/$srcdir ;; esac ac_abs_srcdir=$ac_abs_top_srcdir$ac_dir_suffix cd "$ac_dir" || { ac_status=$?; continue; } # Check for guested configure. if test -f "$ac_srcdir/configure.gnu"; then echo && $SHELL "$ac_srcdir/configure.gnu" --help=recursive elif test -f "$ac_srcdir/configure"; then echo && $SHELL "$ac_srcdir/configure" --help=recursive else $as_echo "$as_me: WARNING: no configuration information is in $ac_dir" >&2 fi || ac_status=$? cd "$ac_pwd" || { ac_status=$?; break; } done fi test -n "$ac_init_help" && exit $ac_status if $ac_init_version; then cat <<\_ACEOF libstoragemgmt configure 1.2.3 generated by GNU Autoconf 2.69 Copyright (C) 2012 Free Software Foundation, Inc. This configure script is free software; the Free Software Foundation gives unlimited permission to copy, distribute and modify it. _ACEOF exit fi ## ------------------------ ## ## Autoconf initialization. ## ## ------------------------ ## # ac_fn_c_try_compile LINENO # -------------------------- # Try to compile conftest.$ac_ext, and return whether this succeeded. ac_fn_c_try_compile () { as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack rm -f conftest.$ac_objext if { { ac_try="$ac_compile" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" $as_echo "$ac_try_echo"; } >&5 (eval "$ac_compile") 2>conftest.err ac_status=$? if test -s conftest.err; then grep -v '^ *+' conftest.err >conftest.er1 cat conftest.er1 >&5 mv -f conftest.er1 conftest.err fi $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; } && { test -z "$ac_c_werror_flag" || test ! -s conftest.err } && test -s conftest.$ac_objext; then : ac_retval=0 else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 ac_retval=1 fi eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno as_fn_set_status $ac_retval } # ac_fn_c_try_compile # ac_fn_cxx_try_compile LINENO # ---------------------------- # Try to compile conftest.$ac_ext, and return whether this succeeded. ac_fn_cxx_try_compile () { as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack rm -f conftest.$ac_objext if { { ac_try="$ac_compile" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" $as_echo "$ac_try_echo"; } >&5 (eval "$ac_compile") 2>conftest.err ac_status=$? if test -s conftest.err; then grep -v '^ *+' conftest.err >conftest.er1 cat conftest.er1 >&5 mv -f conftest.er1 conftest.err fi $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; } && { test -z "$ac_cxx_werror_flag" || test ! -s conftest.err } && test -s conftest.$ac_objext; then : ac_retval=0 else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 ac_retval=1 fi eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno as_fn_set_status $ac_retval } # ac_fn_cxx_try_compile # ac_fn_c_try_cpp LINENO # ---------------------- # Try to preprocess conftest.$ac_ext, and return whether this succeeded. ac_fn_c_try_cpp () { as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack if { { ac_try="$ac_cpp conftest.$ac_ext" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" $as_echo "$ac_try_echo"; } >&5 (eval "$ac_cpp conftest.$ac_ext") 2>conftest.err ac_status=$? if test -s conftest.err; then grep -v '^ *+' conftest.err >conftest.er1 cat conftest.er1 >&5 mv -f conftest.er1 conftest.err fi $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; } > conftest.i && { test -z "$ac_c_preproc_warn_flag$ac_c_werror_flag" || test ! -s conftest.err }; then : ac_retval=0 else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 ac_retval=1 fi eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno as_fn_set_status $ac_retval } # ac_fn_c_try_cpp # ac_fn_c_try_link LINENO # ----------------------- # Try to link conftest.$ac_ext, and return whether this succeeded. ac_fn_c_try_link () { as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack rm -f conftest.$ac_objext conftest$ac_exeext if { { ac_try="$ac_link" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" $as_echo "$ac_try_echo"; } >&5 (eval "$ac_link") 2>conftest.err ac_status=$? if test -s conftest.err; then grep -v '^ *+' conftest.err >conftest.er1 cat conftest.er1 >&5 mv -f conftest.er1 conftest.err fi $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; } && { test -z "$ac_c_werror_flag" || test ! -s conftest.err } && test -s conftest$ac_exeext && { test "$cross_compiling" = yes || test -x conftest$ac_exeext }; then : ac_retval=0 else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 ac_retval=1 fi # Delete the IPA/IPO (Inter Procedural Analysis/Optimization) information # created by the PGI compiler (conftest_ipa8_conftest.oo), as it would # interfere with the next link command; also delete a directory that is # left behind by Apple's compiler. We do this before executing the actions. rm -rf conftest.dSYM conftest_ipa8_conftest.oo eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno as_fn_set_status $ac_retval } # ac_fn_c_try_link # ac_fn_c_check_header_compile LINENO HEADER VAR INCLUDES # ------------------------------------------------------- # Tests whether HEADER exists and can be compiled using the include files in # INCLUDES, setting the cache variable VAR accordingly. ac_fn_c_check_header_compile () { as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $2" >&5 $as_echo_n "checking for $2... " >&6; } if eval \${$3+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ $4 #include <$2> _ACEOF if ac_fn_c_try_compile "$LINENO"; then : eval "$3=yes" else eval "$3=no" fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi eval ac_res=\$$3 { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 $as_echo "$ac_res" >&6; } eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno } # ac_fn_c_check_header_compile # ac_fn_c_try_run LINENO # ---------------------- # Try to link conftest.$ac_ext, and return whether this succeeded. Assumes # that executables *can* be run. ac_fn_c_try_run () { as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack if { { ac_try="$ac_link" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" $as_echo "$ac_try_echo"; } >&5 (eval "$ac_link") 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; } && { ac_try='./conftest$ac_exeext' { { case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" $as_echo "$ac_try_echo"; } >&5 (eval "$ac_try") 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; }; }; then : ac_retval=0 else $as_echo "$as_me: program exited with status $ac_status" >&5 $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 ac_retval=$ac_status fi rm -rf conftest.dSYM conftest_ipa8_conftest.oo eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno as_fn_set_status $ac_retval } # ac_fn_c_try_run # ac_fn_c_check_func LINENO FUNC VAR # ---------------------------------- # Tests whether FUNC exists, setting the cache variable VAR accordingly ac_fn_c_check_func () { as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $2" >&5 $as_echo_n "checking for $2... " >&6; } if eval \${$3+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ /* Define $2 to an innocuous variant, in case declares $2. For example, HP-UX 11i declares gettimeofday. */ #define $2 innocuous_$2 /* System header to define __stub macros and hopefully few prototypes, which can conflict with char $2 (); below. Prefer to if __STDC__ is defined, since exists even on freestanding compilers. */ #ifdef __STDC__ # include #else # include #endif #undef $2 /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char $2 (); /* The GNU C library defines this for functions which it implements to always fail with ENOSYS. Some functions are actually named something starting with __ and the normal name is an alias. */ #if defined __stub_$2 || defined __stub___$2 choke me #endif int main () { return $2 (); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : eval "$3=yes" else eval "$3=no" fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext fi eval ac_res=\$$3 { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 $as_echo "$ac_res" >&6; } eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno } # ac_fn_c_check_func # ac_fn_cxx_try_cpp LINENO # ------------------------ # Try to preprocess conftest.$ac_ext, and return whether this succeeded. ac_fn_cxx_try_cpp () { as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack if { { ac_try="$ac_cpp conftest.$ac_ext" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" $as_echo "$ac_try_echo"; } >&5 (eval "$ac_cpp conftest.$ac_ext") 2>conftest.err ac_status=$? if test -s conftest.err; then grep -v '^ *+' conftest.err >conftest.er1 cat conftest.er1 >&5 mv -f conftest.er1 conftest.err fi $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; } > conftest.i && { test -z "$ac_cxx_preproc_warn_flag$ac_cxx_werror_flag" || test ! -s conftest.err }; then : ac_retval=0 else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 ac_retval=1 fi eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno as_fn_set_status $ac_retval } # ac_fn_cxx_try_cpp # ac_fn_cxx_try_link LINENO # ------------------------- # Try to link conftest.$ac_ext, and return whether this succeeded. ac_fn_cxx_try_link () { as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack rm -f conftest.$ac_objext conftest$ac_exeext if { { ac_try="$ac_link" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" $as_echo "$ac_try_echo"; } >&5 (eval "$ac_link") 2>conftest.err ac_status=$? if test -s conftest.err; then grep -v '^ *+' conftest.err >conftest.er1 cat conftest.er1 >&5 mv -f conftest.er1 conftest.err fi $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; } && { test -z "$ac_cxx_werror_flag" || test ! -s conftest.err } && test -s conftest$ac_exeext && { test "$cross_compiling" = yes || test -x conftest$ac_exeext }; then : ac_retval=0 else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 ac_retval=1 fi # Delete the IPA/IPO (Inter Procedural Analysis/Optimization) information # created by the PGI compiler (conftest_ipa8_conftest.oo), as it would # interfere with the next link command; also delete a directory that is # left behind by Apple's compiler. We do this before executing the actions. rm -rf conftest.dSYM conftest_ipa8_conftest.oo eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno as_fn_set_status $ac_retval } # ac_fn_cxx_try_link # ac_fn_c_check_header_mongrel LINENO HEADER VAR INCLUDES # ------------------------------------------------------- # Tests whether HEADER exists, giving a warning if it cannot be compiled using # the include files in INCLUDES and setting the cache variable VAR # accordingly. ac_fn_c_check_header_mongrel () { as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack if eval \${$3+:} false; then : { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $2" >&5 $as_echo_n "checking for $2... " >&6; } if eval \${$3+:} false; then : $as_echo_n "(cached) " >&6 fi eval ac_res=\$$3 { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 $as_echo "$ac_res" >&6; } else # Is the header compilable? { $as_echo "$as_me:${as_lineno-$LINENO}: checking $2 usability" >&5 $as_echo_n "checking $2 usability... " >&6; } cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ $4 #include <$2> _ACEOF if ac_fn_c_try_compile "$LINENO"; then : ac_header_compiler=yes else ac_header_compiler=no fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_header_compiler" >&5 $as_echo "$ac_header_compiler" >&6; } # Is the header present? { $as_echo "$as_me:${as_lineno-$LINENO}: checking $2 presence" >&5 $as_echo_n "checking $2 presence... " >&6; } cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include <$2> _ACEOF if ac_fn_c_try_cpp "$LINENO"; then : ac_header_preproc=yes else ac_header_preproc=no fi rm -f conftest.err conftest.i conftest.$ac_ext { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_header_preproc" >&5 $as_echo "$ac_header_preproc" >&6; } # So? What about this header? case $ac_header_compiler:$ac_header_preproc:$ac_c_preproc_warn_flag in #(( yes:no: ) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: accepted by the compiler, rejected by the preprocessor!" >&5 $as_echo "$as_me: WARNING: $2: accepted by the compiler, rejected by the preprocessor!" >&2;} { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: proceeding with the compiler's result" >&5 $as_echo "$as_me: WARNING: $2: proceeding with the compiler's result" >&2;} ;; no:yes:* ) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: present but cannot be compiled" >&5 $as_echo "$as_me: WARNING: $2: present but cannot be compiled" >&2;} { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: check for missing prerequisite headers?" >&5 $as_echo "$as_me: WARNING: $2: check for missing prerequisite headers?" >&2;} { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: see the Autoconf documentation" >&5 $as_echo "$as_me: WARNING: $2: see the Autoconf documentation" >&2;} { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: section \"Present But Cannot Be Compiled\"" >&5 $as_echo "$as_me: WARNING: $2: section \"Present But Cannot Be Compiled\"" >&2;} { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: proceeding with the compiler's result" >&5 $as_echo "$as_me: WARNING: $2: proceeding with the compiler's result" >&2;} ( $as_echo "## ---------------------------------------------------------- ## ## Report this to libstoragemgmt-devel@lists.fedorahosted.org ## ## ---------------------------------------------------------- ##" ) | sed "s/^/$as_me: WARNING: /" >&2 ;; esac { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $2" >&5 $as_echo_n "checking for $2... " >&6; } if eval \${$3+:} false; then : $as_echo_n "(cached) " >&6 else eval "$3=\$ac_header_compiler" fi eval ac_res=\$$3 { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 $as_echo "$ac_res" >&6; } fi eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno } # ac_fn_c_check_header_mongrel # ac_fn_c_check_type LINENO TYPE VAR INCLUDES # ------------------------------------------- # Tests whether TYPE exists after having included INCLUDES, setting cache # variable VAR accordingly. ac_fn_c_check_type () { as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $2" >&5 $as_echo_n "checking for $2... " >&6; } if eval \${$3+:} false; then : $as_echo_n "(cached) " >&6 else eval "$3=no" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ $4 int main () { if (sizeof ($2)) return 0; ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ $4 int main () { if (sizeof (($2))) return 0; ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : else eval "$3=yes" fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi eval ac_res=\$$3 { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 $as_echo "$ac_res" >&6; } eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno } # ac_fn_c_check_type # ac_fn_c_find_intX_t LINENO BITS VAR # ----------------------------------- # Finds a signed integer type with width BITS, setting cache variable VAR # accordingly. ac_fn_c_find_intX_t () { as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack { $as_echo "$as_me:${as_lineno-$LINENO}: checking for int$2_t" >&5 $as_echo_n "checking for int$2_t... " >&6; } if eval \${$3+:} false; then : $as_echo_n "(cached) " >&6 else eval "$3=no" # Order is important - never check a type that is potentially smaller # than half of the expected target width. for ac_type in int$2_t 'int' 'long int' \ 'long long int' 'short int' 'signed char'; do cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ $ac_includes_default enum { N = $2 / 2 - 1 }; int main () { static int test_array [1 - 2 * !(0 < ($ac_type) ((((($ac_type) 1 << N) << N) - 1) * 2 + 1))]; test_array [0] = 0; return test_array [0]; ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ $ac_includes_default enum { N = $2 / 2 - 1 }; int main () { static int test_array [1 - 2 * !(($ac_type) ((((($ac_type) 1 << N) << N) - 1) * 2 + 1) < ($ac_type) ((((($ac_type) 1 << N) << N) - 1) * 2 + 2))]; test_array [0] = 0; return test_array [0]; ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : else case $ac_type in #( int$2_t) : eval "$3=yes" ;; #( *) : eval "$3=\$ac_type" ;; esac fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext if eval test \"x\$"$3"\" = x"no"; then : else break fi done fi eval ac_res=\$$3 { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 $as_echo "$ac_res" >&6; } eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno } # ac_fn_c_find_intX_t # ac_fn_c_find_uintX_t LINENO BITS VAR # ------------------------------------ # Finds an unsigned integer type with width BITS, setting cache variable VAR # accordingly. ac_fn_c_find_uintX_t () { as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack { $as_echo "$as_me:${as_lineno-$LINENO}: checking for uint$2_t" >&5 $as_echo_n "checking for uint$2_t... " >&6; } if eval \${$3+:} false; then : $as_echo_n "(cached) " >&6 else eval "$3=no" # Order is important - never check a type that is potentially smaller # than half of the expected target width. for ac_type in uint$2_t 'unsigned int' 'unsigned long int' \ 'unsigned long long int' 'unsigned short int' 'unsigned char'; do cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ $ac_includes_default int main () { static int test_array [1 - 2 * !((($ac_type) -1 >> ($2 / 2 - 1)) >> ($2 / 2 - 1) == 3)]; test_array [0] = 0; return test_array [0]; ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : case $ac_type in #( uint$2_t) : eval "$3=yes" ;; #( *) : eval "$3=\$ac_type" ;; esac fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext if eval test \"x\$"$3"\" = x"no"; then : else break fi done fi eval ac_res=\$$3 { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 $as_echo "$ac_res" >&6; } eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno } # ac_fn_c_find_uintX_t cat >config.log <<_ACEOF This file contains any messages produced by compilers while running configure, to aid debugging if configure makes a mistake. It was created by libstoragemgmt $as_me 1.2.3, which was generated by GNU Autoconf 2.69. Invocation command line was $ $0 $@ _ACEOF exec 5>>config.log { cat <<_ASUNAME ## --------- ## ## Platform. ## ## --------- ## hostname = `(hostname || uname -n) 2>/dev/null | sed 1q` uname -m = `(uname -m) 2>/dev/null || echo unknown` uname -r = `(uname -r) 2>/dev/null || echo unknown` uname -s = `(uname -s) 2>/dev/null || echo unknown` uname -v = `(uname -v) 2>/dev/null || echo unknown` /usr/bin/uname -p = `(/usr/bin/uname -p) 2>/dev/null || echo unknown` /bin/uname -X = `(/bin/uname -X) 2>/dev/null || echo unknown` /bin/arch = `(/bin/arch) 2>/dev/null || echo unknown` /usr/bin/arch -k = `(/usr/bin/arch -k) 2>/dev/null || echo unknown` /usr/convex/getsysinfo = `(/usr/convex/getsysinfo) 2>/dev/null || echo unknown` /usr/bin/hostinfo = `(/usr/bin/hostinfo) 2>/dev/null || echo unknown` /bin/machine = `(/bin/machine) 2>/dev/null || echo unknown` /usr/bin/oslevel = `(/usr/bin/oslevel) 2>/dev/null || echo unknown` /bin/universe = `(/bin/universe) 2>/dev/null || echo unknown` _ASUNAME as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. $as_echo "PATH: $as_dir" done IFS=$as_save_IFS } >&5 cat >&5 <<_ACEOF ## ----------- ## ## Core tests. ## ## ----------- ## _ACEOF # Keep a trace of the command line. # Strip out --no-create and --no-recursion so they do not pile up. # Strip out --silent because we don't want to record it for future runs. # Also quote any args containing shell meta-characters. # Make two passes to allow for proper duplicate-argument suppression. ac_configure_args= ac_configure_args0= ac_configure_args1= ac_must_keep_next=false for ac_pass in 1 2 do for ac_arg do case $ac_arg in -no-create | --no-c* | -n | -no-recursion | --no-r*) continue ;; -q | -quiet | --quiet | --quie | --qui | --qu | --q \ | -silent | --silent | --silen | --sile | --sil) continue ;; *\'*) ac_arg=`$as_echo "$ac_arg" | sed "s/'/'\\\\\\\\''/g"` ;; esac case $ac_pass in 1) as_fn_append ac_configure_args0 " '$ac_arg'" ;; 2) as_fn_append ac_configure_args1 " '$ac_arg'" if test $ac_must_keep_next = true; then ac_must_keep_next=false # Got value, back to normal. else case $ac_arg in *=* | --config-cache | -C | -disable-* | --disable-* \ | -enable-* | --enable-* | -gas | --g* | -nfp | --nf* \ | -q | -quiet | --q* | -silent | --sil* | -v | -verb* \ | -with-* | --with-* | -without-* | --without-* | --x) case "$ac_configure_args0 " in "$ac_configure_args1"*" '$ac_arg' "* ) continue ;; esac ;; -* ) ac_must_keep_next=true ;; esac fi as_fn_append ac_configure_args " '$ac_arg'" ;; esac done done { ac_configure_args0=; unset ac_configure_args0;} { ac_configure_args1=; unset ac_configure_args1;} # When interrupted or exit'd, cleanup temporary files, and complete # config.log. We remove comments because anyway the quotes in there # would cause problems or look ugly. # WARNING: Use '\'' to represent an apostrophe within the trap. # WARNING: Do not start the trap code with a newline, due to a FreeBSD 4.0 bug. trap 'exit_status=$? # Save into config.log some information that might help in debugging. { echo $as_echo "## ---------------- ## ## Cache variables. ## ## ---------------- ##" echo # The following way of writing the cache mishandles newlines in values, ( for ac_var in `(set) 2>&1 | sed -n '\''s/^\([a-zA-Z_][a-zA-Z0-9_]*\)=.*/\1/p'\''`; do eval ac_val=\$$ac_var case $ac_val in #( *${as_nl}*) case $ac_var in #( *_cv_*) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: cache variable $ac_var contains a newline" >&5 $as_echo "$as_me: WARNING: cache variable $ac_var contains a newline" >&2;} ;; esac case $ac_var in #( _ | IFS | as_nl) ;; #( BASH_ARGV | BASH_SOURCE) eval $ac_var= ;; #( *) { eval $ac_var=; unset $ac_var;} ;; esac ;; esac done (set) 2>&1 | case $as_nl`(ac_space='\'' '\''; set) 2>&1` in #( *${as_nl}ac_space=\ *) sed -n \ "s/'\''/'\''\\\\'\'''\''/g; s/^\\([_$as_cr_alnum]*_cv_[_$as_cr_alnum]*\\)=\\(.*\\)/\\1='\''\\2'\''/p" ;; #( *) sed -n "/^[_$as_cr_alnum]*_cv_[_$as_cr_alnum]*=/p" ;; esac | sort ) echo $as_echo "## ----------------- ## ## Output variables. ## ## ----------------- ##" echo for ac_var in $ac_subst_vars do eval ac_val=\$$ac_var case $ac_val in *\'\''*) ac_val=`$as_echo "$ac_val" | sed "s/'\''/'\''\\\\\\\\'\'''\''/g"`;; esac $as_echo "$ac_var='\''$ac_val'\''" done | sort echo if test -n "$ac_subst_files"; then $as_echo "## ------------------- ## ## File substitutions. ## ## ------------------- ##" echo for ac_var in $ac_subst_files do eval ac_val=\$$ac_var case $ac_val in *\'\''*) ac_val=`$as_echo "$ac_val" | sed "s/'\''/'\''\\\\\\\\'\'''\''/g"`;; esac $as_echo "$ac_var='\''$ac_val'\''" done | sort echo fi if test -s confdefs.h; then $as_echo "## ----------- ## ## confdefs.h. ## ## ----------- ##" echo cat confdefs.h echo fi test "$ac_signal" != 0 && $as_echo "$as_me: caught signal $ac_signal" $as_echo "$as_me: exit $exit_status" } >&5 rm -f core *.core core.conftest.* && rm -f -r conftest* confdefs* conf$$* $ac_clean_files && exit $exit_status ' 0 for ac_signal in 1 2 13 15; do trap 'ac_signal='$ac_signal'; as_fn_exit 1' $ac_signal done ac_signal=0 # confdefs.h avoids OS command line length limits that DEFS can exceed. rm -f -r conftest* confdefs.h $as_echo "/* confdefs.h */" > confdefs.h # Predefined preprocessor variables. cat >>confdefs.h <<_ACEOF #define PACKAGE_NAME "$PACKAGE_NAME" _ACEOF cat >>confdefs.h <<_ACEOF #define PACKAGE_TARNAME "$PACKAGE_TARNAME" _ACEOF cat >>confdefs.h <<_ACEOF #define PACKAGE_VERSION "$PACKAGE_VERSION" _ACEOF cat >>confdefs.h <<_ACEOF #define PACKAGE_STRING "$PACKAGE_STRING" _ACEOF cat >>confdefs.h <<_ACEOF #define PACKAGE_BUGREPORT "$PACKAGE_BUGREPORT" _ACEOF cat >>confdefs.h <<_ACEOF #define PACKAGE_URL "$PACKAGE_URL" _ACEOF # Let the site file select an alternate cache file if it wants to. # Prefer an explicitly selected file to automatically selected ones. ac_site_file1=NONE ac_site_file2=NONE if test -n "$CONFIG_SITE"; then # We do not want a PATH search for config.site. case $CONFIG_SITE in #(( -*) ac_site_file1=./$CONFIG_SITE;; */*) ac_site_file1=$CONFIG_SITE;; *) ac_site_file1=./$CONFIG_SITE;; esac elif test "x$prefix" != xNONE; then ac_site_file1=$prefix/share/config.site ac_site_file2=$prefix/etc/config.site else ac_site_file1=$ac_default_prefix/share/config.site ac_site_file2=$ac_default_prefix/etc/config.site fi for ac_site_file in "$ac_site_file1" "$ac_site_file2" do test "x$ac_site_file" = xNONE && continue if test /dev/null != "$ac_site_file" && test -r "$ac_site_file"; then { $as_echo "$as_me:${as_lineno-$LINENO}: loading site script $ac_site_file" >&5 $as_echo "$as_me: loading site script $ac_site_file" >&6;} sed 's/^/| /' "$ac_site_file" >&5 . "$ac_site_file" \ || { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 $as_echo "$as_me: error: in \`$ac_pwd':" >&2;} as_fn_error $? "failed to load site script $ac_site_file See \`config.log' for more details" "$LINENO" 5; } fi done if test -r "$cache_file"; then # Some versions of bash will fail to source /dev/null (special files # actually), so we avoid doing that. DJGPP emulates it as a regular file. if test /dev/null != "$cache_file" && test -f "$cache_file"; then { $as_echo "$as_me:${as_lineno-$LINENO}: loading cache $cache_file" >&5 $as_echo "$as_me: loading cache $cache_file" >&6;} case $cache_file in [\\/]* | ?:[\\/]* ) . "$cache_file";; *) . "./$cache_file";; esac fi else { $as_echo "$as_me:${as_lineno-$LINENO}: creating cache $cache_file" >&5 $as_echo "$as_me: creating cache $cache_file" >&6;} >$cache_file fi # Check that the precious variables saved in the cache have kept the same # value. ac_cache_corrupted=false for ac_var in $ac_precious_vars; do eval ac_old_set=\$ac_cv_env_${ac_var}_set eval ac_new_set=\$ac_env_${ac_var}_set eval ac_old_val=\$ac_cv_env_${ac_var}_value eval ac_new_val=\$ac_env_${ac_var}_value case $ac_old_set,$ac_new_set in set,) { $as_echo "$as_me:${as_lineno-$LINENO}: error: \`$ac_var' was set to \`$ac_old_val' in the previous run" >&5 $as_echo "$as_me: error: \`$ac_var' was set to \`$ac_old_val' in the previous run" >&2;} ac_cache_corrupted=: ;; ,set) { $as_echo "$as_me:${as_lineno-$LINENO}: error: \`$ac_var' was not set in the previous run" >&5 $as_echo "$as_me: error: \`$ac_var' was not set in the previous run" >&2;} ac_cache_corrupted=: ;; ,);; *) if test "x$ac_old_val" != "x$ac_new_val"; then # differences in whitespace do not lead to failure. ac_old_val_w=`echo x $ac_old_val` ac_new_val_w=`echo x $ac_new_val` if test "$ac_old_val_w" != "$ac_new_val_w"; then { $as_echo "$as_me:${as_lineno-$LINENO}: error: \`$ac_var' has changed since the previous run:" >&5 $as_echo "$as_me: error: \`$ac_var' has changed since the previous run:" >&2;} ac_cache_corrupted=: else { $as_echo "$as_me:${as_lineno-$LINENO}: warning: ignoring whitespace changes in \`$ac_var' since the previous run:" >&5 $as_echo "$as_me: warning: ignoring whitespace changes in \`$ac_var' since the previous run:" >&2;} eval $ac_var=\$ac_old_val fi { $as_echo "$as_me:${as_lineno-$LINENO}: former value: \`$ac_old_val'" >&5 $as_echo "$as_me: former value: \`$ac_old_val'" >&2;} { $as_echo "$as_me:${as_lineno-$LINENO}: current value: \`$ac_new_val'" >&5 $as_echo "$as_me: current value: \`$ac_new_val'" >&2;} fi;; esac # Pass precious variables to config.status. if test "$ac_new_set" = set; then case $ac_new_val in *\'*) ac_arg=$ac_var=`$as_echo "$ac_new_val" | sed "s/'/'\\\\\\\\''/g"` ;; *) ac_arg=$ac_var=$ac_new_val ;; esac case " $ac_configure_args " in *" '$ac_arg' "*) ;; # Avoid dups. Use of quotes ensures accuracy. *) as_fn_append ac_configure_args " '$ac_arg'" ;; esac fi done if $ac_cache_corrupted; then { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 $as_echo "$as_me: error: in \`$ac_pwd':" >&2;} { $as_echo "$as_me:${as_lineno-$LINENO}: error: changes in the environment can compromise the build" >&5 $as_echo "$as_me: error: changes in the environment can compromise the build" >&2;} as_fn_error $? "run \`make distclean' and/or \`rm $cache_file' and start over" "$LINENO" 5 fi ## -------------------- ## ## Main body of script. ## ## -------------------- ## ac_ext=c ac_cpp='$CPP $CPPFLAGS' ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_c_compiler_gnu ac_aux_dir= for ac_dir in build-aux "$srcdir"/build-aux; do if test -f "$ac_dir/install-sh"; then ac_aux_dir=$ac_dir ac_install_sh="$ac_aux_dir/install-sh -c" break elif test -f "$ac_dir/install.sh"; then ac_aux_dir=$ac_dir ac_install_sh="$ac_aux_dir/install.sh -c" break elif test -f "$ac_dir/shtool"; then ac_aux_dir=$ac_dir ac_install_sh="$ac_aux_dir/shtool install -c" break fi done if test -z "$ac_aux_dir"; then as_fn_error $? "cannot find install-sh, install.sh, or shtool in build-aux \"$srcdir\"/build-aux" "$LINENO" 5 fi # These three variables are undocumented and unsupported, # and are intended to be withdrawn in a future Autoconf release. # They can cause serious problems if a builder's source tree is in a directory # whose full name contains unusual characters. ac_config_guess="$SHELL $ac_aux_dir/config.guess" # Please don't use this var. ac_config_sub="$SHELL $ac_aux_dir/config.sub" # Please don't use this var. ac_configure="$SHELL $ac_aux_dir/configure" # Please don't use this var. ac_config_headers="$ac_config_headers config.h" am__api_version='1.13' # Find a good install program. We prefer a C program (faster), # so one script is as good as another. But avoid the broken or # incompatible versions: # SysV /etc/install, /usr/sbin/install # SunOS /usr/etc/install # IRIX /sbin/install # AIX /bin/install # AmigaOS /C/install, which installs bootblocks on floppy discs # AIX 4 /usr/bin/installbsd, which doesn't work without a -g flag # AFS /usr/afsws/bin/install, which mishandles nonexistent args # SVR4 /usr/ucb/install, which tries to use the nonexistent group "staff" # OS/2's system install, which has a completely different semantic # ./install, which can be erroneously created by make from ./install.sh. # Reject install programs that cannot install multiple files. { $as_echo "$as_me:${as_lineno-$LINENO}: checking for a BSD-compatible install" >&5 $as_echo_n "checking for a BSD-compatible install... " >&6; } if test -z "$INSTALL"; then if ${ac_cv_path_install+:} false; then : $as_echo_n "(cached) " >&6 else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. # Account for people who put trailing slashes in PATH elements. case $as_dir/ in #(( ./ | .// | /[cC]/* | \ /etc/* | /usr/sbin/* | /usr/etc/* | /sbin/* | /usr/afsws/bin/* | \ ?:[\\/]os2[\\/]install[\\/]* | ?:[\\/]OS2[\\/]INSTALL[\\/]* | \ /usr/ucb/* ) ;; *) # OSF1 and SCO ODT 3.0 have their own names for install. # Don't use installbsd from OSF since it installs stuff as root # by default. for ac_prog in ginstall scoinst install; do for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_prog$ac_exec_ext"; then if test $ac_prog = install && grep dspmsg "$as_dir/$ac_prog$ac_exec_ext" >/dev/null 2>&1; then # AIX install. It has an incompatible calling convention. : elif test $ac_prog = install && grep pwplus "$as_dir/$ac_prog$ac_exec_ext" >/dev/null 2>&1; then # program-specific install script used by HP pwplus--don't use. : else rm -rf conftest.one conftest.two conftest.dir echo one > conftest.one echo two > conftest.two mkdir conftest.dir if "$as_dir/$ac_prog$ac_exec_ext" -c conftest.one conftest.two "`pwd`/conftest.dir" && test -s conftest.one && test -s conftest.two && test -s conftest.dir/conftest.one && test -s conftest.dir/conftest.two then ac_cv_path_install="$as_dir/$ac_prog$ac_exec_ext -c" break 3 fi fi fi done done ;; esac done IFS=$as_save_IFS rm -rf conftest.one conftest.two conftest.dir fi if test "${ac_cv_path_install+set}" = set; then INSTALL=$ac_cv_path_install else # As a last resort, use the slow shell script. Don't cache a # value for INSTALL within a source directory, because that will # break other packages using the cache if that directory is # removed, or if the value is a relative name. INSTALL=$ac_install_sh fi fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $INSTALL" >&5 $as_echo "$INSTALL" >&6; } # Use test -z because SunOS4 sh mishandles braces in ${var-val}. # It thinks the first close brace ends the variable substitution. test -z "$INSTALL_PROGRAM" && INSTALL_PROGRAM='${INSTALL}' test -z "$INSTALL_SCRIPT" && INSTALL_SCRIPT='${INSTALL}' test -z "$INSTALL_DATA" && INSTALL_DATA='${INSTALL} -m 644' { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether build environment is sane" >&5 $as_echo_n "checking whether build environment is sane... " >&6; } # Reject unsafe characters in $srcdir or the absolute working directory # name. Accept space and tab only in the latter. am_lf=' ' case `pwd` in *[\\\"\#\$\&\'\`$am_lf]*) as_fn_error $? "unsafe absolute working directory name" "$LINENO" 5;; esac case $srcdir in *[\\\"\#\$\&\'\`$am_lf\ \ ]*) as_fn_error $? "unsafe srcdir value: '$srcdir'" "$LINENO" 5;; esac # Do 'set' in a subshell so we don't clobber the current shell's # arguments. Must try -L first in case configure is actually a # symlink; some systems play weird games with the mod time of symlinks # (eg FreeBSD returns the mod time of the symlink's containing # directory). if ( am_has_slept=no for am_try in 1 2; do echo "timestamp, slept: $am_has_slept" > conftest.file set X `ls -Lt "$srcdir/configure" conftest.file 2> /dev/null` if test "$*" = "X"; then # -L didn't work. set X `ls -t "$srcdir/configure" conftest.file` fi if test "$*" != "X $srcdir/configure conftest.file" \ && test "$*" != "X conftest.file $srcdir/configure"; then # If neither matched, then we have a broken ls. This can happen # if, for instance, CONFIG_SHELL is bash and it inherits a # broken ls alias from the environment. This has actually # happened. Such a system could not be considered "sane". as_fn_error $? "ls -t appears to fail. Make sure there is not a broken alias in your environment" "$LINENO" 5 fi if test "$2" = conftest.file || test $am_try -eq 2; then break fi # Just in case. sleep 1 am_has_slept=yes done test "$2" = conftest.file ) then # Ok. : else as_fn_error $? "newly created file is older than distributed files! Check your system clock" "$LINENO" 5 fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 $as_echo "yes" >&6; } # If we didn't sleep, we still need to ensure time stamps of config.status and # generated files are strictly newer. am_sleep_pid= if grep 'slept: no' conftest.file >/dev/null 2>&1; then ( sleep 1 ) & am_sleep_pid=$! fi rm -f conftest.file test "$program_prefix" != NONE && program_transform_name="s&^&$program_prefix&;$program_transform_name" # Use a double $ so make ignores it. test "$program_suffix" != NONE && program_transform_name="s&\$&$program_suffix&;$program_transform_name" # Double any \ or $. # By default was `s,x,x', remove it if useless. ac_script='s/[\\$]/&&/g;s/;s,x,x,$//' program_transform_name=`$as_echo "$program_transform_name" | sed "$ac_script"` # expand $ac_aux_dir to an absolute path am_aux_dir=`cd $ac_aux_dir && pwd` if test x"${MISSING+set}" != xset; then case $am_aux_dir in *\ * | *\ *) MISSING="\${SHELL} \"$am_aux_dir/missing\"" ;; *) MISSING="\${SHELL} $am_aux_dir/missing" ;; esac fi # Use eval to expand $SHELL if eval "$MISSING --is-lightweight"; then am_missing_run="$MISSING " else am_missing_run= { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: 'missing' script is too old or missing" >&5 $as_echo "$as_me: WARNING: 'missing' script is too old or missing" >&2;} fi if test x"${install_sh}" != xset; then case $am_aux_dir in *\ * | *\ *) install_sh="\${SHELL} '$am_aux_dir/install-sh'" ;; *) install_sh="\${SHELL} $am_aux_dir/install-sh" esac fi # Installed binaries are usually stripped using 'strip' when the user # run "make install-strip". However 'strip' might not be the right # tool to use in cross-compilation environments, therefore Automake # will honor the 'STRIP' environment variable to overrule this program. if test "$cross_compiling" != no; then if test -n "$ac_tool_prefix"; then # Extract the first word of "${ac_tool_prefix}strip", so it can be a program name with args. set dummy ${ac_tool_prefix}strip; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_STRIP+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$STRIP"; then ac_cv_prog_STRIP="$STRIP" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_prog_STRIP="${ac_tool_prefix}strip" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi STRIP=$ac_cv_prog_STRIP if test -n "$STRIP"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $STRIP" >&5 $as_echo "$STRIP" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi fi if test -z "$ac_cv_prog_STRIP"; then ac_ct_STRIP=$STRIP # Extract the first word of "strip", so it can be a program name with args. set dummy strip; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_ac_ct_STRIP+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$ac_ct_STRIP"; then ac_cv_prog_ac_ct_STRIP="$ac_ct_STRIP" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_prog_ac_ct_STRIP="strip" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi ac_ct_STRIP=$ac_cv_prog_ac_ct_STRIP if test -n "$ac_ct_STRIP"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_STRIP" >&5 $as_echo "$ac_ct_STRIP" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi if test "x$ac_ct_STRIP" = x; then STRIP=":" else case $cross_compiling:$ac_tool_warned in yes:) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 $as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} ac_tool_warned=yes ;; esac STRIP=$ac_ct_STRIP fi else STRIP="$ac_cv_prog_STRIP" fi fi INSTALL_STRIP_PROGRAM="\$(install_sh) -c -s" { $as_echo "$as_me:${as_lineno-$LINENO}: checking for a thread-safe mkdir -p" >&5 $as_echo_n "checking for a thread-safe mkdir -p... " >&6; } if test -z "$MKDIR_P"; then if ${ac_cv_path_mkdir+:} false; then : $as_echo_n "(cached) " >&6 else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH$PATH_SEPARATOR/opt/sfw/bin do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_prog in mkdir gmkdir; do for ac_exec_ext in '' $ac_executable_extensions; do as_fn_executable_p "$as_dir/$ac_prog$ac_exec_ext" || continue case `"$as_dir/$ac_prog$ac_exec_ext" --version 2>&1` in #( 'mkdir (GNU coreutils) '* | \ 'mkdir (coreutils) '* | \ 'mkdir (fileutils) '4.1*) ac_cv_path_mkdir=$as_dir/$ac_prog$ac_exec_ext break 3;; esac done done done IFS=$as_save_IFS fi test -d ./--version && rmdir ./--version if test "${ac_cv_path_mkdir+set}" = set; then MKDIR_P="$ac_cv_path_mkdir -p" else # As a last resort, use the slow shell script. Don't cache a # value for MKDIR_P within a source directory, because that will # break other packages using the cache if that directory is # removed, or if the value is a relative name. MKDIR_P="$ac_install_sh -d" fi fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $MKDIR_P" >&5 $as_echo "$MKDIR_P" >&6; } for ac_prog in gawk mawk nawk awk do # Extract the first word of "$ac_prog", so it can be a program name with args. set dummy $ac_prog; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_AWK+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$AWK"; then ac_cv_prog_AWK="$AWK" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_prog_AWK="$ac_prog" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi AWK=$ac_cv_prog_AWK if test -n "$AWK"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $AWK" >&5 $as_echo "$AWK" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi test -n "$AWK" && break done { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether ${MAKE-make} sets \$(MAKE)" >&5 $as_echo_n "checking whether ${MAKE-make} sets \$(MAKE)... " >&6; } set x ${MAKE-make} ac_make=`$as_echo "$2" | sed 's/+/p/g; s/[^a-zA-Z0-9_]/_/g'` if eval \${ac_cv_prog_make_${ac_make}_set+:} false; then : $as_echo_n "(cached) " >&6 else cat >conftest.make <<\_ACEOF SHELL = /bin/sh all: @echo '@@@%%%=$(MAKE)=@@@%%%' _ACEOF # GNU make sometimes prints "make[1]: Entering ...", which would confuse us. case `${MAKE-make} -f conftest.make 2>/dev/null` in *@@@%%%=?*=@@@%%%*) eval ac_cv_prog_make_${ac_make}_set=yes;; *) eval ac_cv_prog_make_${ac_make}_set=no;; esac rm -f conftest.make fi if eval test \$ac_cv_prog_make_${ac_make}_set = yes; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 $as_echo "yes" >&6; } SET_MAKE= else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } SET_MAKE="MAKE=${MAKE-make}" fi rm -rf .tst 2>/dev/null mkdir .tst 2>/dev/null if test -d .tst; then am__leading_dot=. else am__leading_dot=_ fi rmdir .tst 2>/dev/null # Check whether --enable-silent-rules was given. if test "${enable_silent_rules+set}" = set; then : enableval=$enable_silent_rules; fi case $enable_silent_rules in # ((( yes) AM_DEFAULT_VERBOSITY=0;; no) AM_DEFAULT_VERBOSITY=1;; *) AM_DEFAULT_VERBOSITY=1;; esac am_make=${MAKE-make} { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether $am_make supports nested variables" >&5 $as_echo_n "checking whether $am_make supports nested variables... " >&6; } if ${am_cv_make_support_nested_variables+:} false; then : $as_echo_n "(cached) " >&6 else if $as_echo 'TRUE=$(BAR$(V)) BAR0=false BAR1=true V=1 am__doit: @$(TRUE) .PHONY: am__doit' | $am_make -f - >/dev/null 2>&1; then am_cv_make_support_nested_variables=yes else am_cv_make_support_nested_variables=no fi fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $am_cv_make_support_nested_variables" >&5 $as_echo "$am_cv_make_support_nested_variables" >&6; } if test $am_cv_make_support_nested_variables = yes; then AM_V='$(V)' AM_DEFAULT_V='$(AM_DEFAULT_VERBOSITY)' else AM_V=$AM_DEFAULT_VERBOSITY AM_DEFAULT_V=$AM_DEFAULT_VERBOSITY fi AM_BACKSLASH='\' if test "`cd $srcdir && pwd`" != "`pwd`"; then # Use -I$(srcdir) only when $(srcdir) != ., so that make's output # is not polluted with repeated "-I." am__isrc=' -I$(srcdir)' # test to see if srcdir already configured if test -f $srcdir/config.status; then as_fn_error $? "source directory already configured; run \"make distclean\" there first" "$LINENO" 5 fi fi # test whether we have cygpath if test -z "$CYGPATH_W"; then if (cygpath --version) >/dev/null 2>/dev/null; then CYGPATH_W='cygpath -w' else CYGPATH_W=echo fi fi # Define the identity of the package. PACKAGE='libstoragemgmt' VERSION='1.2.3' cat >>confdefs.h <<_ACEOF #define PACKAGE "$PACKAGE" _ACEOF cat >>confdefs.h <<_ACEOF #define VERSION "$VERSION" _ACEOF # Some tools Automake needs. ACLOCAL=${ACLOCAL-"${am_missing_run}aclocal-${am__api_version}"} AUTOCONF=${AUTOCONF-"${am_missing_run}autoconf"} AUTOMAKE=${AUTOMAKE-"${am_missing_run}automake-${am__api_version}"} AUTOHEADER=${AUTOHEADER-"${am_missing_run}autoheader"} MAKEINFO=${MAKEINFO-"${am_missing_run}makeinfo"} # For better backward compatibility. To be removed once Automake 1.9.x # dies out for good. For more background, see: # # mkdir_p='$(MKDIR_P)' # We need awk for the "check" target. The system "awk" is bad on # some platforms. # Always define AMTAR for backward compatibility. Yes, it's still used # in the wild :-( We should find a proper way to deprecate it ... AMTAR='$${TAR-tar}' # We'll loop over all known methods to create a tar archive until one works. _am_tools='gnutar pax cpio none' am__tar='$${TAR-tar} chof - "$$tardir"' am__untar='$${TAR-tar} xf -' { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether to enable maintainer-specific portions of Makefiles" >&5 $as_echo_n "checking whether to enable maintainer-specific portions of Makefiles... " >&6; } # Check whether --enable-maintainer-mode was given. if test "${enable_maintainer_mode+set}" = set; then : enableval=$enable_maintainer_mode; USE_MAINTAINER_MODE=$enableval else USE_MAINTAINER_MODE=yes fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $USE_MAINTAINER_MODE" >&5 $as_echo "$USE_MAINTAINER_MODE" >&6; } if test $USE_MAINTAINER_MODE = yes; then MAINTAINER_MODE_TRUE= MAINTAINER_MODE_FALSE='#' else MAINTAINER_MODE_TRUE='#' MAINTAINER_MODE_FALSE= fi MAINT=$MAINTAINER_MODE_TRUE # Enable silent build when available (Automake 1.11) # Check whether --enable-silent-rules was given. if test "${enable_silent_rules+set}" = set; then : enableval=$enable_silent_rules; fi case $enable_silent_rules in # ((( yes) AM_DEFAULT_VERBOSITY=0;; no) AM_DEFAULT_VERBOSITY=1;; *) AM_DEFAULT_VERBOSITY=0;; esac am_make=${MAKE-make} { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether $am_make supports nested variables" >&5 $as_echo_n "checking whether $am_make supports nested variables... " >&6; } if ${am_cv_make_support_nested_variables+:} false; then : $as_echo_n "(cached) " >&6 else if $as_echo 'TRUE=$(BAR$(V)) BAR0=false BAR1=true V=1 am__doit: @$(TRUE) .PHONY: am__doit' | $am_make -f - >/dev/null 2>&1; then am_cv_make_support_nested_variables=yes else am_cv_make_support_nested_variables=no fi fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $am_cv_make_support_nested_variables" >&5 $as_echo "$am_cv_make_support_nested_variables" >&6; } if test $am_cv_make_support_nested_variables = yes; then AM_V='$(V)' AM_DEFAULT_V='$(AM_DEFAULT_VERBOSITY)' else AM_V=$AM_DEFAULT_VERBOSITY AM_DEFAULT_V=$AM_DEFAULT_VERBOSITY fi AM_BACKSLASH='\' if test "x$ac_cv_env_PKG_CONFIG_set" != "xset"; then if test -n "$ac_tool_prefix"; then # Extract the first word of "${ac_tool_prefix}pkg-config", so it can be a program name with args. set dummy ${ac_tool_prefix}pkg-config; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_path_PKG_CONFIG+:} false; then : $as_echo_n "(cached) " >&6 else case $PKG_CONFIG in [\\/]* | ?:[\\/]*) ac_cv_path_PKG_CONFIG="$PKG_CONFIG" # Let the user override the test with a path. ;; *) as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_path_PKG_CONFIG="$as_dir/$ac_word$ac_exec_ext" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS ;; esac fi PKG_CONFIG=$ac_cv_path_PKG_CONFIG if test -n "$PKG_CONFIG"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $PKG_CONFIG" >&5 $as_echo "$PKG_CONFIG" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi fi if test -z "$ac_cv_path_PKG_CONFIG"; then ac_pt_PKG_CONFIG=$PKG_CONFIG # Extract the first word of "pkg-config", so it can be a program name with args. set dummy pkg-config; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_path_ac_pt_PKG_CONFIG+:} false; then : $as_echo_n "(cached) " >&6 else case $ac_pt_PKG_CONFIG in [\\/]* | ?:[\\/]*) ac_cv_path_ac_pt_PKG_CONFIG="$ac_pt_PKG_CONFIG" # Let the user override the test with a path. ;; *) as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_path_ac_pt_PKG_CONFIG="$as_dir/$ac_word$ac_exec_ext" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS ;; esac fi ac_pt_PKG_CONFIG=$ac_cv_path_ac_pt_PKG_CONFIG if test -n "$ac_pt_PKG_CONFIG"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_pt_PKG_CONFIG" >&5 $as_echo "$ac_pt_PKG_CONFIG" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi if test "x$ac_pt_PKG_CONFIG" = x; then PKG_CONFIG="" else case $cross_compiling:$ac_tool_warned in yes:) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 $as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} ac_tool_warned=yes ;; esac PKG_CONFIG=$ac_pt_PKG_CONFIG fi else PKG_CONFIG="$ac_cv_path_PKG_CONFIG" fi fi if test -n "$PKG_CONFIG"; then _pkg_min_version=0.9.0 { $as_echo "$as_me:${as_lineno-$LINENO}: checking pkg-config is at least version $_pkg_min_version" >&5 $as_echo_n "checking pkg-config is at least version $_pkg_min_version... " >&6; } if $PKG_CONFIG --atleast-pkgconfig-version $_pkg_min_version; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 $as_echo "yes" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } PKG_CONFIG="" fi fi # Make sure we can run config.sub. $SHELL "$ac_aux_dir/config.sub" sun4 >/dev/null 2>&1 || as_fn_error $? "cannot run $SHELL $ac_aux_dir/config.sub" "$LINENO" 5 { $as_echo "$as_me:${as_lineno-$LINENO}: checking build system type" >&5 $as_echo_n "checking build system type... " >&6; } if ${ac_cv_build+:} false; then : $as_echo_n "(cached) " >&6 else ac_build_alias=$build_alias test "x$ac_build_alias" = x && ac_build_alias=`$SHELL "$ac_aux_dir/config.guess"` test "x$ac_build_alias" = x && as_fn_error $? "cannot guess build type; you must specify one" "$LINENO" 5 ac_cv_build=`$SHELL "$ac_aux_dir/config.sub" $ac_build_alias` || as_fn_error $? "$SHELL $ac_aux_dir/config.sub $ac_build_alias failed" "$LINENO" 5 fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_build" >&5 $as_echo "$ac_cv_build" >&6; } case $ac_cv_build in *-*-*) ;; *) as_fn_error $? "invalid value of canonical build" "$LINENO" 5;; esac build=$ac_cv_build ac_save_IFS=$IFS; IFS='-' set x $ac_cv_build shift build_cpu=$1 build_vendor=$2 shift; shift # Remember, the first character of IFS is used to create $*, # except with old shells: build_os=$* IFS=$ac_save_IFS case $build_os in *\ *) build_os=`echo "$build_os" | sed 's/ /-/g'`;; esac { $as_echo "$as_me:${as_lineno-$LINENO}: checking host system type" >&5 $as_echo_n "checking host system type... " >&6; } if ${ac_cv_host+:} false; then : $as_echo_n "(cached) " >&6 else if test "x$host_alias" = x; then ac_cv_host=$ac_cv_build else ac_cv_host=`$SHELL "$ac_aux_dir/config.sub" $host_alias` || as_fn_error $? "$SHELL $ac_aux_dir/config.sub $host_alias failed" "$LINENO" 5 fi fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_host" >&5 $as_echo "$ac_cv_host" >&6; } case $ac_cv_host in *-*-*) ;; *) as_fn_error $? "invalid value of canonical host" "$LINENO" 5;; esac host=$ac_cv_host ac_save_IFS=$IFS; IFS='-' set x $ac_cv_host shift host_cpu=$1 host_vendor=$2 shift; shift # Remember, the first character of IFS is used to create $*, # except with old shells: host_os=$* IFS=$ac_save_IFS case $host_os in *\ *) host_os=`echo "$host_os" | sed 's/ /-/g'`;; esac LIBSM_MAJOR_VERSION=`echo $VERSION | awk -F. '{print $1}'` LIBSM_MINOR_VERSION=`echo $VERSION | awk -F. '{print $2}'` LIBSM_MICRO_VERSION=`echo $VERSION | awk -F. '{print $3}'` LIBSM_VERSION=$LIBSM_MAJOR_VERSION.$LIBSM_MINOR_VERSION.$LIBSM_MICRO_VERSION$LIBSM_MICRO_VERSION_SUFFIX LIBSM_VERSION_INFO=`expr $LIBSM_MAJOR_VERSION + $LIBSM_MINOR_VERSION`:$LIBSM_MICRO_VERSION:$LIBSM_MINOR_VERSION LIBSM_VERSION_NUMBER=`expr $LIBSM_MAJOR_VERSION \* 1000000 + $LIBSM_MINOR_VERSION \* 1000 + $LIBSM_MICRO_VERSION` # Our intention is that we will always be backward compatible. Thus we will # set the library version in such a way so that we will always be # libstoragemgmt.so.1.n.n once we officially release our ver 1.0.0. # # To make this happen we will use the minor version as the libtool current and # age set to minor - 1 and the micro used for revision. Basically this will get # us what we expect while utilizing the libtool revision system. # # For this to work we need to make sure that when we add to the interface we # increment minor and set micro to 0. If we make a code change which doesn't # change the API we can just bump micro. # # 0.1.0 -> libstoragemgmt.so.0.1.0 # 1.0.0 -> libstoragemgmt.so.1.0.0 # 1.1.0 -> libstoragemgmt.so.1.1.0 # 1.1.1 -> libstoragemgmt.so.1.1.1 CURRENT=`expr $LIBSM_MAJOR_VERSION '*' 1 + $LIBSM_MINOR_VERSION` AGE=$LIBSM_MINOR_VERSION REVISION=$LIBSM_MICRO_VERSION LIBSM_LIBTOOL_VERSION=$CURRENT:$REVISION:$AGE LIBXML_REQUIRED="2.5.0" ac_ext=c ac_cpp='$CPP $CPPFLAGS' ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_c_compiler_gnu if test -n "$ac_tool_prefix"; then # Extract the first word of "${ac_tool_prefix}gcc", so it can be a program name with args. set dummy ${ac_tool_prefix}gcc; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_CC+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$CC"; then ac_cv_prog_CC="$CC" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_prog_CC="${ac_tool_prefix}gcc" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi CC=$ac_cv_prog_CC if test -n "$CC"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CC" >&5 $as_echo "$CC" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi fi if test -z "$ac_cv_prog_CC"; then ac_ct_CC=$CC # Extract the first word of "gcc", so it can be a program name with args. set dummy gcc; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_ac_ct_CC+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$ac_ct_CC"; then ac_cv_prog_ac_ct_CC="$ac_ct_CC" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_prog_ac_ct_CC="gcc" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi ac_ct_CC=$ac_cv_prog_ac_ct_CC if test -n "$ac_ct_CC"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_CC" >&5 $as_echo "$ac_ct_CC" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi if test "x$ac_ct_CC" = x; then CC="" else case $cross_compiling:$ac_tool_warned in yes:) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 $as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} ac_tool_warned=yes ;; esac CC=$ac_ct_CC fi else CC="$ac_cv_prog_CC" fi if test -z "$CC"; then if test -n "$ac_tool_prefix"; then # Extract the first word of "${ac_tool_prefix}cc", so it can be a program name with args. set dummy ${ac_tool_prefix}cc; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_CC+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$CC"; then ac_cv_prog_CC="$CC" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_prog_CC="${ac_tool_prefix}cc" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi CC=$ac_cv_prog_CC if test -n "$CC"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CC" >&5 $as_echo "$CC" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi fi fi if test -z "$CC"; then # Extract the first word of "cc", so it can be a program name with args. set dummy cc; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_CC+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$CC"; then ac_cv_prog_CC="$CC" # Let the user override the test. else ac_prog_rejected=no as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then if test "$as_dir/$ac_word$ac_exec_ext" = "/usr/ucb/cc"; then ac_prog_rejected=yes continue fi ac_cv_prog_CC="cc" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS if test $ac_prog_rejected = yes; then # We found a bogon in the path, so make sure we never use it. set dummy $ac_cv_prog_CC shift if test $# != 0; then # We chose a different compiler from the bogus one. # However, it has the same basename, so the bogon will be chosen # first if we set CC to just the basename; use the full file name. shift ac_cv_prog_CC="$as_dir/$ac_word${1+' '}$@" fi fi fi fi CC=$ac_cv_prog_CC if test -n "$CC"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CC" >&5 $as_echo "$CC" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi fi if test -z "$CC"; then if test -n "$ac_tool_prefix"; then for ac_prog in cl.exe do # Extract the first word of "$ac_tool_prefix$ac_prog", so it can be a program name with args. set dummy $ac_tool_prefix$ac_prog; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_CC+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$CC"; then ac_cv_prog_CC="$CC" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_prog_CC="$ac_tool_prefix$ac_prog" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi CC=$ac_cv_prog_CC if test -n "$CC"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CC" >&5 $as_echo "$CC" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi test -n "$CC" && break done fi if test -z "$CC"; then ac_ct_CC=$CC for ac_prog in cl.exe do # Extract the first word of "$ac_prog", so it can be a program name with args. set dummy $ac_prog; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_ac_ct_CC+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$ac_ct_CC"; then ac_cv_prog_ac_ct_CC="$ac_ct_CC" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_prog_ac_ct_CC="$ac_prog" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi ac_ct_CC=$ac_cv_prog_ac_ct_CC if test -n "$ac_ct_CC"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_CC" >&5 $as_echo "$ac_ct_CC" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi test -n "$ac_ct_CC" && break done if test "x$ac_ct_CC" = x; then CC="" else case $cross_compiling:$ac_tool_warned in yes:) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 $as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} ac_tool_warned=yes ;; esac CC=$ac_ct_CC fi fi fi test -z "$CC" && { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 $as_echo "$as_me: error: in \`$ac_pwd':" >&2;} as_fn_error $? "no acceptable C compiler found in \$PATH See \`config.log' for more details" "$LINENO" 5; } # Provide some information about the compiler. $as_echo "$as_me:${as_lineno-$LINENO}: checking for C compiler version" >&5 set X $ac_compile ac_compiler=$2 for ac_option in --version -v -V -qversion; do { { ac_try="$ac_compiler $ac_option >&5" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" $as_echo "$ac_try_echo"; } >&5 (eval "$ac_compiler $ac_option >&5") 2>conftest.err ac_status=$? if test -s conftest.err; then sed '10a\ ... rest of stderr output deleted ... 10q' conftest.err >conftest.er1 cat conftest.er1 >&5 fi rm -f conftest.er1 conftest.err $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; } done cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { ; return 0; } _ACEOF ac_clean_files_save=$ac_clean_files ac_clean_files="$ac_clean_files a.out a.out.dSYM a.exe b.out" # Try to create an executable without -o first, disregard a.out. # It will help us diagnose broken compilers, and finding out an intuition # of exeext. { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether the C compiler works" >&5 $as_echo_n "checking whether the C compiler works... " >&6; } ac_link_default=`$as_echo "$ac_link" | sed 's/ -o *conftest[^ ]*//'` # The possible output files: ac_files="a.out conftest.exe conftest a.exe a_out.exe b.out conftest.*" ac_rmfiles= for ac_file in $ac_files do case $ac_file in *.$ac_ext | *.xcoff | *.tds | *.d | *.pdb | *.xSYM | *.bb | *.bbg | *.map | *.inf | *.dSYM | *.o | *.obj ) ;; * ) ac_rmfiles="$ac_rmfiles $ac_file";; esac done rm -f $ac_rmfiles if { { ac_try="$ac_link_default" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" $as_echo "$ac_try_echo"; } >&5 (eval "$ac_link_default") 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; }; then : # Autoconf-2.13 could set the ac_cv_exeext variable to `no'. # So ignore a value of `no', otherwise this would lead to `EXEEXT = no' # in a Makefile. We should not override ac_cv_exeext if it was cached, # so that the user can short-circuit this test for compilers unknown to # Autoconf. for ac_file in $ac_files '' do test -f "$ac_file" || continue case $ac_file in *.$ac_ext | *.xcoff | *.tds | *.d | *.pdb | *.xSYM | *.bb | *.bbg | *.map | *.inf | *.dSYM | *.o | *.obj ) ;; [ab].out ) # We found the default executable, but exeext='' is most # certainly right. break;; *.* ) if test "${ac_cv_exeext+set}" = set && test "$ac_cv_exeext" != no; then :; else ac_cv_exeext=`expr "$ac_file" : '[^.]*\(\..*\)'` fi # We set ac_cv_exeext here because the later test for it is not # safe: cross compilers may not add the suffix if given an `-o' # argument, so we may need to know it at that point already. # Even if this section looks crufty: it has the advantage of # actually working. break;; * ) break;; esac done test "$ac_cv_exeext" = no && ac_cv_exeext= else ac_file='' fi if test -z "$ac_file"; then : { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 $as_echo "$as_me: error: in \`$ac_pwd':" >&2;} as_fn_error 77 "C compiler cannot create executables See \`config.log' for more details" "$LINENO" 5; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 $as_echo "yes" >&6; } fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking for C compiler default output file name" >&5 $as_echo_n "checking for C compiler default output file name... " >&6; } { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_file" >&5 $as_echo "$ac_file" >&6; } ac_exeext=$ac_cv_exeext rm -f -r a.out a.out.dSYM a.exe conftest$ac_cv_exeext b.out ac_clean_files=$ac_clean_files_save { $as_echo "$as_me:${as_lineno-$LINENO}: checking for suffix of executables" >&5 $as_echo_n "checking for suffix of executables... " >&6; } if { { ac_try="$ac_link" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" $as_echo "$ac_try_echo"; } >&5 (eval "$ac_link") 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; }; then : # If both `conftest.exe' and `conftest' are `present' (well, observable) # catch `conftest.exe'. For instance with Cygwin, `ls conftest' will # work properly (i.e., refer to `conftest.exe'), while it won't with # `rm'. for ac_file in conftest.exe conftest conftest.*; do test -f "$ac_file" || continue case $ac_file in *.$ac_ext | *.xcoff | *.tds | *.d | *.pdb | *.xSYM | *.bb | *.bbg | *.map | *.inf | *.dSYM | *.o | *.obj ) ;; *.* ) ac_cv_exeext=`expr "$ac_file" : '[^.]*\(\..*\)'` break;; * ) break;; esac done else { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 $as_echo "$as_me: error: in \`$ac_pwd':" >&2;} as_fn_error $? "cannot compute suffix of executables: cannot compile and link See \`config.log' for more details" "$LINENO" 5; } fi rm -f conftest conftest$ac_cv_exeext { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_exeext" >&5 $as_echo "$ac_cv_exeext" >&6; } rm -f conftest.$ac_ext EXEEXT=$ac_cv_exeext ac_exeext=$EXEEXT cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include int main () { FILE *f = fopen ("conftest.out", "w"); return ferror (f) || fclose (f) != 0; ; return 0; } _ACEOF ac_clean_files="$ac_clean_files conftest.out" # Check that the compiler produces executables we can run. If not, either # the compiler is broken, or we cross compile. { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether we are cross compiling" >&5 $as_echo_n "checking whether we are cross compiling... " >&6; } if test "$cross_compiling" != yes; then { { ac_try="$ac_link" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" $as_echo "$ac_try_echo"; } >&5 (eval "$ac_link") 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; } if { ac_try='./conftest$ac_cv_exeext' { { case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" $as_echo "$ac_try_echo"; } >&5 (eval "$ac_try") 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; }; }; then cross_compiling=no else if test "$cross_compiling" = maybe; then cross_compiling=yes else { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 $as_echo "$as_me: error: in \`$ac_pwd':" >&2;} as_fn_error $? "cannot run C compiled programs. If you meant to cross compile, use \`--host'. See \`config.log' for more details" "$LINENO" 5; } fi fi fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $cross_compiling" >&5 $as_echo "$cross_compiling" >&6; } rm -f conftest.$ac_ext conftest$ac_cv_exeext conftest.out ac_clean_files=$ac_clean_files_save { $as_echo "$as_me:${as_lineno-$LINENO}: checking for suffix of object files" >&5 $as_echo_n "checking for suffix of object files... " >&6; } if ${ac_cv_objext+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { ; return 0; } _ACEOF rm -f conftest.o conftest.obj if { { ac_try="$ac_compile" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" $as_echo "$ac_try_echo"; } >&5 (eval "$ac_compile") 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; }; then : for ac_file in conftest.o conftest.obj conftest.*; do test -f "$ac_file" || continue; case $ac_file in *.$ac_ext | *.xcoff | *.tds | *.d | *.pdb | *.xSYM | *.bb | *.bbg | *.map | *.inf | *.dSYM ) ;; *) ac_cv_objext=`expr "$ac_file" : '.*\.\(.*\)'` break;; esac done else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 $as_echo "$as_me: error: in \`$ac_pwd':" >&2;} as_fn_error $? "cannot compute suffix of object files: cannot compile See \`config.log' for more details" "$LINENO" 5; } fi rm -f conftest.$ac_cv_objext conftest.$ac_ext fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_objext" >&5 $as_echo "$ac_cv_objext" >&6; } OBJEXT=$ac_cv_objext ac_objext=$OBJEXT { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether we are using the GNU C compiler" >&5 $as_echo_n "checking whether we are using the GNU C compiler... " >&6; } if ${ac_cv_c_compiler_gnu+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { #ifndef __GNUC__ choke me #endif ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : ac_compiler_gnu=yes else ac_compiler_gnu=no fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext ac_cv_c_compiler_gnu=$ac_compiler_gnu fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_c_compiler_gnu" >&5 $as_echo "$ac_cv_c_compiler_gnu" >&6; } if test $ac_compiler_gnu = yes; then GCC=yes else GCC= fi ac_test_CFLAGS=${CFLAGS+set} ac_save_CFLAGS=$CFLAGS { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether $CC accepts -g" >&5 $as_echo_n "checking whether $CC accepts -g... " >&6; } if ${ac_cv_prog_cc_g+:} false; then : $as_echo_n "(cached) " >&6 else ac_save_c_werror_flag=$ac_c_werror_flag ac_c_werror_flag=yes ac_cv_prog_cc_g=no CFLAGS="-g" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : ac_cv_prog_cc_g=yes else CFLAGS="" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : else ac_c_werror_flag=$ac_save_c_werror_flag CFLAGS="-g" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : ac_cv_prog_cc_g=yes fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext ac_c_werror_flag=$ac_save_c_werror_flag fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_prog_cc_g" >&5 $as_echo "$ac_cv_prog_cc_g" >&6; } if test "$ac_test_CFLAGS" = set; then CFLAGS=$ac_save_CFLAGS elif test $ac_cv_prog_cc_g = yes; then if test "$GCC" = yes; then CFLAGS="-g -O2" else CFLAGS="-g" fi else if test "$GCC" = yes; then CFLAGS="-O2" else CFLAGS= fi fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $CC option to accept ISO C89" >&5 $as_echo_n "checking for $CC option to accept ISO C89... " >&6; } if ${ac_cv_prog_cc_c89+:} false; then : $as_echo_n "(cached) " >&6 else ac_cv_prog_cc_c89=no ac_save_CC=$CC cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include #include struct stat; /* Most of the following tests are stolen from RCS 5.7's src/conf.sh. */ struct buf { int x; }; FILE * (*rcsopen) (struct buf *, struct stat *, int); static char *e (p, i) char **p; int i; { return p[i]; } static char *f (char * (*g) (char **, int), char **p, ...) { char *s; va_list v; va_start (v,p); s = g (p, va_arg (v,int)); va_end (v); return s; } /* OSF 4.0 Compaq cc is some sort of almost-ANSI by default. It has function prototypes and stuff, but not '\xHH' hex character constants. These don't provoke an error unfortunately, instead are silently treated as 'x'. The following induces an error, until -std is added to get proper ANSI mode. Curiously '\x00'!='x' always comes out true, for an array size at least. It's necessary to write '\x00'==0 to get something that's true only with -std. */ int osf4_cc_array ['\x00' == 0 ? 1 : -1]; /* IBM C 6 for AIX is almost-ANSI by default, but it replaces macro parameters inside strings and character constants. */ #define FOO(x) 'x' int xlc6_cc_array[FOO(a) == 'x' ? 1 : -1]; int test (int i, double x); struct s1 {int (*f) (int a);}; struct s2 {int (*f) (double a);}; int pairnames (int, char **, FILE *(*)(struct buf *, struct stat *, int), int, int); int argc; char **argv; int main () { return f (e, argv, 0) != argv[0] || f (e, argv, 1) != argv[1]; ; return 0; } _ACEOF for ac_arg in '' -qlanglvl=extc89 -qlanglvl=ansi -std \ -Ae "-Aa -D_HPUX_SOURCE" "-Xc -D__EXTENSIONS__" do CC="$ac_save_CC $ac_arg" if ac_fn_c_try_compile "$LINENO"; then : ac_cv_prog_cc_c89=$ac_arg fi rm -f core conftest.err conftest.$ac_objext test "x$ac_cv_prog_cc_c89" != "xno" && break done rm -f conftest.$ac_ext CC=$ac_save_CC fi # AC_CACHE_VAL case "x$ac_cv_prog_cc_c89" in x) { $as_echo "$as_me:${as_lineno-$LINENO}: result: none needed" >&5 $as_echo "none needed" >&6; } ;; xno) { $as_echo "$as_me:${as_lineno-$LINENO}: result: unsupported" >&5 $as_echo "unsupported" >&6; } ;; *) CC="$CC $ac_cv_prog_cc_c89" { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_prog_cc_c89" >&5 $as_echo "$ac_cv_prog_cc_c89" >&6; } ;; esac if test "x$ac_cv_prog_cc_c89" != xno; then : fi ac_ext=c ac_cpp='$CPP $CPPFLAGS' ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_c_compiler_gnu DEPDIR="${am__leading_dot}deps" ac_config_commands="$ac_config_commands depfiles" am_make=${MAKE-make} cat > confinc << 'END' am__doit: @echo this is the am__doit target .PHONY: am__doit END # If we don't find an include directive, just comment out the code. { $as_echo "$as_me:${as_lineno-$LINENO}: checking for style of include used by $am_make" >&5 $as_echo_n "checking for style of include used by $am_make... " >&6; } am__include="#" am__quote= _am_result=none # First try GNU make style include. echo "include confinc" > confmf # Ignore all kinds of additional output from 'make'. case `$am_make -s -f confmf 2> /dev/null` in #( *the\ am__doit\ target*) am__include=include am__quote= _am_result=GNU ;; esac # Now try BSD make style include. if test "$am__include" = "#"; then echo '.include "confinc"' > confmf case `$am_make -s -f confmf 2> /dev/null` in #( *the\ am__doit\ target*) am__include=.include am__quote="\"" _am_result=BSD ;; esac fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $_am_result" >&5 $as_echo "$_am_result" >&6; } rm -f confinc confmf # Check whether --enable-dependency-tracking was given. if test "${enable_dependency_tracking+set}" = set; then : enableval=$enable_dependency_tracking; fi if test "x$enable_dependency_tracking" != xno; then am_depcomp="$ac_aux_dir/depcomp" AMDEPBACKSLASH='\' am__nodep='_no' fi if test "x$enable_dependency_tracking" != xno; then AMDEP_TRUE= AMDEP_FALSE='#' else AMDEP_TRUE='#' AMDEP_FALSE= fi depcc="$CC" am_compiler_list= { $as_echo "$as_me:${as_lineno-$LINENO}: checking dependency style of $depcc" >&5 $as_echo_n "checking dependency style of $depcc... " >&6; } if ${am_cv_CC_dependencies_compiler_type+:} false; then : $as_echo_n "(cached) " >&6 else if test -z "$AMDEP_TRUE" && test -f "$am_depcomp"; then # We make a subdir and do the tests there. Otherwise we can end up # making bogus files that we don't know about and never remove. For # instance it was reported that on HP-UX the gcc test will end up # making a dummy file named 'D' -- because '-MD' means "put the output # in D". rm -rf conftest.dir mkdir conftest.dir # Copy depcomp to subdir because otherwise we won't find it if we're # using a relative directory. cp "$am_depcomp" conftest.dir cd conftest.dir # We will build objects and dependencies in a subdirectory because # it helps to detect inapplicable dependency modes. For instance # both Tru64's cc and ICC support -MD to output dependencies as a # side effect of compilation, but ICC will put the dependencies in # the current directory while Tru64 will put them in the object # directory. mkdir sub am_cv_CC_dependencies_compiler_type=none if test "$am_compiler_list" = ""; then am_compiler_list=`sed -n 's/^#*\([a-zA-Z0-9]*\))$/\1/p' < ./depcomp` fi am__universal=false case " $depcc " in #( *\ -arch\ *\ -arch\ *) am__universal=true ;; esac for depmode in $am_compiler_list; do # Setup a source with many dependencies, because some compilers # like to wrap large dependency lists on column 80 (with \), and # we should not choose a depcomp mode which is confused by this. # # We need to recreate these files for each test, as the compiler may # overwrite some of them when testing with obscure command lines. # This happens at least with the AIX C compiler. : > sub/conftest.c for i in 1 2 3 4 5 6; do echo '#include "conftst'$i'.h"' >> sub/conftest.c # Using ": > sub/conftst$i.h" creates only sub/conftst1.h with # Solaris 10 /bin/sh. echo '/* dummy */' > sub/conftst$i.h done echo "${am__include} ${am__quote}sub/conftest.Po${am__quote}" > confmf # We check with '-c' and '-o' for the sake of the "dashmstdout" # mode. It turns out that the SunPro C++ compiler does not properly # handle '-M -o', and we need to detect this. Also, some Intel # versions had trouble with output in subdirs. am__obj=sub/conftest.${OBJEXT-o} am__minus_obj="-o $am__obj" case $depmode in gcc) # This depmode causes a compiler race in universal mode. test "$am__universal" = false || continue ;; nosideeffect) # After this tag, mechanisms are not by side-effect, so they'll # only be used when explicitly requested. if test "x$enable_dependency_tracking" = xyes; then continue else break fi ;; msvc7 | msvc7msys | msvisualcpp | msvcmsys) # This compiler won't grok '-c -o', but also, the minuso test has # not run yet. These depmodes are late enough in the game, and # so weak that their functioning should not be impacted. am__obj=conftest.${OBJEXT-o} am__minus_obj= ;; none) break ;; esac if depmode=$depmode \ source=sub/conftest.c object=$am__obj \ depfile=sub/conftest.Po tmpdepfile=sub/conftest.TPo \ $SHELL ./depcomp $depcc -c $am__minus_obj sub/conftest.c \ >/dev/null 2>conftest.err && grep sub/conftst1.h sub/conftest.Po > /dev/null 2>&1 && grep sub/conftst6.h sub/conftest.Po > /dev/null 2>&1 && grep $am__obj sub/conftest.Po > /dev/null 2>&1 && ${MAKE-make} -s -f confmf > /dev/null 2>&1; then # icc doesn't choke on unknown options, it will just issue warnings # or remarks (even with -Werror). So we grep stderr for any message # that says an option was ignored or not supported. # When given -MP, icc 7.0 and 7.1 complain thusly: # icc: Command line warning: ignoring option '-M'; no argument required # The diagnosis changed in icc 8.0: # icc: Command line remark: option '-MP' not supported if (grep 'ignoring option' conftest.err || grep 'not supported' conftest.err) >/dev/null 2>&1; then :; else am_cv_CC_dependencies_compiler_type=$depmode break fi fi done cd .. rm -rf conftest.dir else am_cv_CC_dependencies_compiler_type=none fi fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $am_cv_CC_dependencies_compiler_type" >&5 $as_echo "$am_cv_CC_dependencies_compiler_type" >&6; } CCDEPMODE=depmode=$am_cv_CC_dependencies_compiler_type if test "x$enable_dependency_tracking" != xno \ && test "$am_cv_CC_dependencies_compiler_type" = gcc3; then am__fastdepCC_TRUE= am__fastdepCC_FALSE='#' else am__fastdepCC_TRUE='#' am__fastdepCC_FALSE= fi ac_ext=cpp ac_cpp='$CXXCPP $CPPFLAGS' ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_cxx_compiler_gnu if test -z "$CXX"; then if test -n "$CCC"; then CXX=$CCC else if test -n "$ac_tool_prefix"; then for ac_prog in g++ c++ gpp aCC CC cxx cc++ cl.exe FCC KCC RCC xlC_r xlC do # Extract the first word of "$ac_tool_prefix$ac_prog", so it can be a program name with args. set dummy $ac_tool_prefix$ac_prog; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_CXX+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$CXX"; then ac_cv_prog_CXX="$CXX" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_prog_CXX="$ac_tool_prefix$ac_prog" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi CXX=$ac_cv_prog_CXX if test -n "$CXX"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CXX" >&5 $as_echo "$CXX" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi test -n "$CXX" && break done fi if test -z "$CXX"; then ac_ct_CXX=$CXX for ac_prog in g++ c++ gpp aCC CC cxx cc++ cl.exe FCC KCC RCC xlC_r xlC do # Extract the first word of "$ac_prog", so it can be a program name with args. set dummy $ac_prog; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_ac_ct_CXX+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$ac_ct_CXX"; then ac_cv_prog_ac_ct_CXX="$ac_ct_CXX" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_prog_ac_ct_CXX="$ac_prog" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi ac_ct_CXX=$ac_cv_prog_ac_ct_CXX if test -n "$ac_ct_CXX"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_CXX" >&5 $as_echo "$ac_ct_CXX" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi test -n "$ac_ct_CXX" && break done if test "x$ac_ct_CXX" = x; then CXX="g++" else case $cross_compiling:$ac_tool_warned in yes:) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 $as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} ac_tool_warned=yes ;; esac CXX=$ac_ct_CXX fi fi fi fi # Provide some information about the compiler. $as_echo "$as_me:${as_lineno-$LINENO}: checking for C++ compiler version" >&5 set X $ac_compile ac_compiler=$2 for ac_option in --version -v -V -qversion; do { { ac_try="$ac_compiler $ac_option >&5" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" $as_echo "$ac_try_echo"; } >&5 (eval "$ac_compiler $ac_option >&5") 2>conftest.err ac_status=$? if test -s conftest.err; then sed '10a\ ... rest of stderr output deleted ... 10q' conftest.err >conftest.er1 cat conftest.er1 >&5 fi rm -f conftest.er1 conftest.err $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; } done { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether we are using the GNU C++ compiler" >&5 $as_echo_n "checking whether we are using the GNU C++ compiler... " >&6; } if ${ac_cv_cxx_compiler_gnu+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { #ifndef __GNUC__ choke me #endif ; return 0; } _ACEOF if ac_fn_cxx_try_compile "$LINENO"; then : ac_compiler_gnu=yes else ac_compiler_gnu=no fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext ac_cv_cxx_compiler_gnu=$ac_compiler_gnu fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_cxx_compiler_gnu" >&5 $as_echo "$ac_cv_cxx_compiler_gnu" >&6; } if test $ac_compiler_gnu = yes; then GXX=yes else GXX= fi ac_test_CXXFLAGS=${CXXFLAGS+set} ac_save_CXXFLAGS=$CXXFLAGS { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether $CXX accepts -g" >&5 $as_echo_n "checking whether $CXX accepts -g... " >&6; } if ${ac_cv_prog_cxx_g+:} false; then : $as_echo_n "(cached) " >&6 else ac_save_cxx_werror_flag=$ac_cxx_werror_flag ac_cxx_werror_flag=yes ac_cv_prog_cxx_g=no CXXFLAGS="-g" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { ; return 0; } _ACEOF if ac_fn_cxx_try_compile "$LINENO"; then : ac_cv_prog_cxx_g=yes else CXXFLAGS="" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { ; return 0; } _ACEOF if ac_fn_cxx_try_compile "$LINENO"; then : else ac_cxx_werror_flag=$ac_save_cxx_werror_flag CXXFLAGS="-g" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { ; return 0; } _ACEOF if ac_fn_cxx_try_compile "$LINENO"; then : ac_cv_prog_cxx_g=yes fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext ac_cxx_werror_flag=$ac_save_cxx_werror_flag fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_prog_cxx_g" >&5 $as_echo "$ac_cv_prog_cxx_g" >&6; } if test "$ac_test_CXXFLAGS" = set; then CXXFLAGS=$ac_save_CXXFLAGS elif test $ac_cv_prog_cxx_g = yes; then if test "$GXX" = yes; then CXXFLAGS="-g -O2" else CXXFLAGS="-g" fi else if test "$GXX" = yes; then CXXFLAGS="-O2" else CXXFLAGS= fi fi ac_ext=c ac_cpp='$CPP $CPPFLAGS' ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_c_compiler_gnu depcc="$CXX" am_compiler_list= { $as_echo "$as_me:${as_lineno-$LINENO}: checking dependency style of $depcc" >&5 $as_echo_n "checking dependency style of $depcc... " >&6; } if ${am_cv_CXX_dependencies_compiler_type+:} false; then : $as_echo_n "(cached) " >&6 else if test -z "$AMDEP_TRUE" && test -f "$am_depcomp"; then # We make a subdir and do the tests there. Otherwise we can end up # making bogus files that we don't know about and never remove. For # instance it was reported that on HP-UX the gcc test will end up # making a dummy file named 'D' -- because '-MD' means "put the output # in D". rm -rf conftest.dir mkdir conftest.dir # Copy depcomp to subdir because otherwise we won't find it if we're # using a relative directory. cp "$am_depcomp" conftest.dir cd conftest.dir # We will build objects and dependencies in a subdirectory because # it helps to detect inapplicable dependency modes. For instance # both Tru64's cc and ICC support -MD to output dependencies as a # side effect of compilation, but ICC will put the dependencies in # the current directory while Tru64 will put them in the object # directory. mkdir sub am_cv_CXX_dependencies_compiler_type=none if test "$am_compiler_list" = ""; then am_compiler_list=`sed -n 's/^#*\([a-zA-Z0-9]*\))$/\1/p' < ./depcomp` fi am__universal=false case " $depcc " in #( *\ -arch\ *\ -arch\ *) am__universal=true ;; esac for depmode in $am_compiler_list; do # Setup a source with many dependencies, because some compilers # like to wrap large dependency lists on column 80 (with \), and # we should not choose a depcomp mode which is confused by this. # # We need to recreate these files for each test, as the compiler may # overwrite some of them when testing with obscure command lines. # This happens at least with the AIX C compiler. : > sub/conftest.c for i in 1 2 3 4 5 6; do echo '#include "conftst'$i'.h"' >> sub/conftest.c # Using ": > sub/conftst$i.h" creates only sub/conftst1.h with # Solaris 10 /bin/sh. echo '/* dummy */' > sub/conftst$i.h done echo "${am__include} ${am__quote}sub/conftest.Po${am__quote}" > confmf # We check with '-c' and '-o' for the sake of the "dashmstdout" # mode. It turns out that the SunPro C++ compiler does not properly # handle '-M -o', and we need to detect this. Also, some Intel # versions had trouble with output in subdirs. am__obj=sub/conftest.${OBJEXT-o} am__minus_obj="-o $am__obj" case $depmode in gcc) # This depmode causes a compiler race in universal mode. test "$am__universal" = false || continue ;; nosideeffect) # After this tag, mechanisms are not by side-effect, so they'll # only be used when explicitly requested. if test "x$enable_dependency_tracking" = xyes; then continue else break fi ;; msvc7 | msvc7msys | msvisualcpp | msvcmsys) # This compiler won't grok '-c -o', but also, the minuso test has # not run yet. These depmodes are late enough in the game, and # so weak that their functioning should not be impacted. am__obj=conftest.${OBJEXT-o} am__minus_obj= ;; none) break ;; esac if depmode=$depmode \ source=sub/conftest.c object=$am__obj \ depfile=sub/conftest.Po tmpdepfile=sub/conftest.TPo \ $SHELL ./depcomp $depcc -c $am__minus_obj sub/conftest.c \ >/dev/null 2>conftest.err && grep sub/conftst1.h sub/conftest.Po > /dev/null 2>&1 && grep sub/conftst6.h sub/conftest.Po > /dev/null 2>&1 && grep $am__obj sub/conftest.Po > /dev/null 2>&1 && ${MAKE-make} -s -f confmf > /dev/null 2>&1; then # icc doesn't choke on unknown options, it will just issue warnings # or remarks (even with -Werror). So we grep stderr for any message # that says an option was ignored or not supported. # When given -MP, icc 7.0 and 7.1 complain thusly: # icc: Command line warning: ignoring option '-M'; no argument required # The diagnosis changed in icc 8.0: # icc: Command line remark: option '-MP' not supported if (grep 'ignoring option' conftest.err || grep 'not supported' conftest.err) >/dev/null 2>&1; then :; else am_cv_CXX_dependencies_compiler_type=$depmode break fi fi done cd .. rm -rf conftest.dir else am_cv_CXX_dependencies_compiler_type=none fi fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $am_cv_CXX_dependencies_compiler_type" >&5 $as_echo "$am_cv_CXX_dependencies_compiler_type" >&6; } CXXDEPMODE=depmode=$am_cv_CXX_dependencies_compiler_type if test "x$enable_dependency_tracking" != xno \ && test "$am_cv_CXX_dependencies_compiler_type" = gcc3; then am__fastdepCXX_TRUE= am__fastdepCXX_FALSE='#' else am__fastdepCXX_TRUE='#' am__fastdepCXX_FALSE= fi for ac_prog in gawk mawk nawk awk do # Extract the first word of "$ac_prog", so it can be a program name with args. set dummy $ac_prog; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_AWK+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$AWK"; then ac_cv_prog_AWK="$AWK" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_prog_AWK="$ac_prog" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi AWK=$ac_cv_prog_AWK if test -n "$AWK"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $AWK" >&5 $as_echo "$AWK" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi test -n "$AWK" && break done { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether ln -s works" >&5 $as_echo_n "checking whether ln -s works... " >&6; } LN_S=$as_ln_s if test "$LN_S" = "ln -s"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 $as_echo "yes" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no, using $LN_S" >&5 $as_echo "no, using $LN_S" >&6; } fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether ${MAKE-make} sets \$(MAKE)" >&5 $as_echo_n "checking whether ${MAKE-make} sets \$(MAKE)... " >&6; } set x ${MAKE-make} ac_make=`$as_echo "$2" | sed 's/+/p/g; s/[^a-zA-Z0-9_]/_/g'` if eval \${ac_cv_prog_make_${ac_make}_set+:} false; then : $as_echo_n "(cached) " >&6 else cat >conftest.make <<\_ACEOF SHELL = /bin/sh all: @echo '@@@%%%=$(MAKE)=@@@%%%' _ACEOF # GNU make sometimes prints "make[1]: Entering ...", which would confuse us. case `${MAKE-make} -f conftest.make 2>/dev/null` in *@@@%%%=?*=@@@%%%*) eval ac_cv_prog_make_${ac_make}_set=yes;; *) eval ac_cv_prog_make_${ac_make}_set=no;; esac rm -f conftest.make fi if eval test \$ac_cv_prog_make_${ac_make}_set = yes; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 $as_echo "yes" >&6; } SET_MAKE= else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } SET_MAKE="MAKE=${MAKE-make}" fi ac_ext=c ac_cpp='$CPP $CPPFLAGS' ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_c_compiler_gnu { $as_echo "$as_me:${as_lineno-$LINENO}: checking how to run the C preprocessor" >&5 $as_echo_n "checking how to run the C preprocessor... " >&6; } # On Suns, sometimes $CPP names a directory. if test -n "$CPP" && test -d "$CPP"; then CPP= fi if test -z "$CPP"; then if ${ac_cv_prog_CPP+:} false; then : $as_echo_n "(cached) " >&6 else # Double quotes because CPP needs to be expanded for CPP in "$CC -E" "$CC -E -traditional-cpp" "/lib/cpp" do ac_preproc_ok=false for ac_c_preproc_warn_flag in '' yes do # Use a header file that comes with gcc, so configuring glibc # with a fresh cross-compiler works. # Prefer to if __STDC__ is defined, since # exists even on freestanding compilers. # On the NeXT, cc -E runs the code through the compiler's parser, # not just through cpp. "Syntax error" is here to catch this case. cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #ifdef __STDC__ # include #else # include #endif Syntax error _ACEOF if ac_fn_c_try_cpp "$LINENO"; then : else # Broken: fails on valid input. continue fi rm -f conftest.err conftest.i conftest.$ac_ext # OK, works on sane cases. Now check whether nonexistent headers # can be detected and how. cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include _ACEOF if ac_fn_c_try_cpp "$LINENO"; then : # Broken: success on invalid input. continue else # Passes both tests. ac_preproc_ok=: break fi rm -f conftest.err conftest.i conftest.$ac_ext done # Because of `break', _AC_PREPROC_IFELSE's cleaning code was skipped. rm -f conftest.i conftest.err conftest.$ac_ext if $ac_preproc_ok; then : break fi done ac_cv_prog_CPP=$CPP fi CPP=$ac_cv_prog_CPP else ac_cv_prog_CPP=$CPP fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CPP" >&5 $as_echo "$CPP" >&6; } ac_preproc_ok=false for ac_c_preproc_warn_flag in '' yes do # Use a header file that comes with gcc, so configuring glibc # with a fresh cross-compiler works. # Prefer to if __STDC__ is defined, since # exists even on freestanding compilers. # On the NeXT, cc -E runs the code through the compiler's parser, # not just through cpp. "Syntax error" is here to catch this case. cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #ifdef __STDC__ # include #else # include #endif Syntax error _ACEOF if ac_fn_c_try_cpp "$LINENO"; then : else # Broken: fails on valid input. continue fi rm -f conftest.err conftest.i conftest.$ac_ext # OK, works on sane cases. Now check whether nonexistent headers # can be detected and how. cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include _ACEOF if ac_fn_c_try_cpp "$LINENO"; then : # Broken: success on invalid input. continue else # Passes both tests. ac_preproc_ok=: break fi rm -f conftest.err conftest.i conftest.$ac_ext done # Because of `break', _AC_PREPROC_IFELSE's cleaning code was skipped. rm -f conftest.i conftest.err conftest.$ac_ext if $ac_preproc_ok; then : else { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 $as_echo "$as_me: error: in \`$ac_pwd':" >&2;} as_fn_error $? "C preprocessor \"$CPP\" fails sanity check See \`config.log' for more details" "$LINENO" 5; } fi ac_ext=c ac_cpp='$CPP $CPPFLAGS' ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_c_compiler_gnu ac_ext=c ac_cpp='$CPP $CPPFLAGS' ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_c_compiler_gnu if test -n "$ac_tool_prefix"; then # Extract the first word of "${ac_tool_prefix}gcc", so it can be a program name with args. set dummy ${ac_tool_prefix}gcc; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_CC+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$CC"; then ac_cv_prog_CC="$CC" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_prog_CC="${ac_tool_prefix}gcc" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi CC=$ac_cv_prog_CC if test -n "$CC"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CC" >&5 $as_echo "$CC" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi fi if test -z "$ac_cv_prog_CC"; then ac_ct_CC=$CC # Extract the first word of "gcc", so it can be a program name with args. set dummy gcc; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_ac_ct_CC+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$ac_ct_CC"; then ac_cv_prog_ac_ct_CC="$ac_ct_CC" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_prog_ac_ct_CC="gcc" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi ac_ct_CC=$ac_cv_prog_ac_ct_CC if test -n "$ac_ct_CC"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_CC" >&5 $as_echo "$ac_ct_CC" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi if test "x$ac_ct_CC" = x; then CC="" else case $cross_compiling:$ac_tool_warned in yes:) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 $as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} ac_tool_warned=yes ;; esac CC=$ac_ct_CC fi else CC="$ac_cv_prog_CC" fi if test -z "$CC"; then if test -n "$ac_tool_prefix"; then # Extract the first word of "${ac_tool_prefix}cc", so it can be a program name with args. set dummy ${ac_tool_prefix}cc; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_CC+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$CC"; then ac_cv_prog_CC="$CC" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_prog_CC="${ac_tool_prefix}cc" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi CC=$ac_cv_prog_CC if test -n "$CC"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CC" >&5 $as_echo "$CC" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi fi fi if test -z "$CC"; then # Extract the first word of "cc", so it can be a program name with args. set dummy cc; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_CC+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$CC"; then ac_cv_prog_CC="$CC" # Let the user override the test. else ac_prog_rejected=no as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then if test "$as_dir/$ac_word$ac_exec_ext" = "/usr/ucb/cc"; then ac_prog_rejected=yes continue fi ac_cv_prog_CC="cc" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS if test $ac_prog_rejected = yes; then # We found a bogon in the path, so make sure we never use it. set dummy $ac_cv_prog_CC shift if test $# != 0; then # We chose a different compiler from the bogus one. # However, it has the same basename, so the bogon will be chosen # first if we set CC to just the basename; use the full file name. shift ac_cv_prog_CC="$as_dir/$ac_word${1+' '}$@" fi fi fi fi CC=$ac_cv_prog_CC if test -n "$CC"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CC" >&5 $as_echo "$CC" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi fi if test -z "$CC"; then if test -n "$ac_tool_prefix"; then for ac_prog in cl.exe do # Extract the first word of "$ac_tool_prefix$ac_prog", so it can be a program name with args. set dummy $ac_tool_prefix$ac_prog; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_CC+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$CC"; then ac_cv_prog_CC="$CC" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_prog_CC="$ac_tool_prefix$ac_prog" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi CC=$ac_cv_prog_CC if test -n "$CC"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CC" >&5 $as_echo "$CC" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi test -n "$CC" && break done fi if test -z "$CC"; then ac_ct_CC=$CC for ac_prog in cl.exe do # Extract the first word of "$ac_prog", so it can be a program name with args. set dummy $ac_prog; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_ac_ct_CC+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$ac_ct_CC"; then ac_cv_prog_ac_ct_CC="$ac_ct_CC" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_prog_ac_ct_CC="$ac_prog" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi ac_ct_CC=$ac_cv_prog_ac_ct_CC if test -n "$ac_ct_CC"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_CC" >&5 $as_echo "$ac_ct_CC" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi test -n "$ac_ct_CC" && break done if test "x$ac_ct_CC" = x; then CC="" else case $cross_compiling:$ac_tool_warned in yes:) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 $as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} ac_tool_warned=yes ;; esac CC=$ac_ct_CC fi fi fi test -z "$CC" && { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 $as_echo "$as_me: error: in \`$ac_pwd':" >&2;} as_fn_error $? "no acceptable C compiler found in \$PATH See \`config.log' for more details" "$LINENO" 5; } # Provide some information about the compiler. $as_echo "$as_me:${as_lineno-$LINENO}: checking for C compiler version" >&5 set X $ac_compile ac_compiler=$2 for ac_option in --version -v -V -qversion; do { { ac_try="$ac_compiler $ac_option >&5" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" $as_echo "$ac_try_echo"; } >&5 (eval "$ac_compiler $ac_option >&5") 2>conftest.err ac_status=$? if test -s conftest.err; then sed '10a\ ... rest of stderr output deleted ... 10q' conftest.err >conftest.er1 cat conftest.er1 >&5 fi rm -f conftest.er1 conftest.err $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; } done { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether we are using the GNU C compiler" >&5 $as_echo_n "checking whether we are using the GNU C compiler... " >&6; } if ${ac_cv_c_compiler_gnu+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { #ifndef __GNUC__ choke me #endif ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : ac_compiler_gnu=yes else ac_compiler_gnu=no fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext ac_cv_c_compiler_gnu=$ac_compiler_gnu fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_c_compiler_gnu" >&5 $as_echo "$ac_cv_c_compiler_gnu" >&6; } if test $ac_compiler_gnu = yes; then GCC=yes else GCC= fi ac_test_CFLAGS=${CFLAGS+set} ac_save_CFLAGS=$CFLAGS { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether $CC accepts -g" >&5 $as_echo_n "checking whether $CC accepts -g... " >&6; } if ${ac_cv_prog_cc_g+:} false; then : $as_echo_n "(cached) " >&6 else ac_save_c_werror_flag=$ac_c_werror_flag ac_c_werror_flag=yes ac_cv_prog_cc_g=no CFLAGS="-g" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : ac_cv_prog_cc_g=yes else CFLAGS="" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : else ac_c_werror_flag=$ac_save_c_werror_flag CFLAGS="-g" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : ac_cv_prog_cc_g=yes fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext ac_c_werror_flag=$ac_save_c_werror_flag fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_prog_cc_g" >&5 $as_echo "$ac_cv_prog_cc_g" >&6; } if test "$ac_test_CFLAGS" = set; then CFLAGS=$ac_save_CFLAGS elif test $ac_cv_prog_cc_g = yes; then if test "$GCC" = yes; then CFLAGS="-g -O2" else CFLAGS="-g" fi else if test "$GCC" = yes; then CFLAGS="-O2" else CFLAGS= fi fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $CC option to accept ISO C89" >&5 $as_echo_n "checking for $CC option to accept ISO C89... " >&6; } if ${ac_cv_prog_cc_c89+:} false; then : $as_echo_n "(cached) " >&6 else ac_cv_prog_cc_c89=no ac_save_CC=$CC cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include #include struct stat; /* Most of the following tests are stolen from RCS 5.7's src/conf.sh. */ struct buf { int x; }; FILE * (*rcsopen) (struct buf *, struct stat *, int); static char *e (p, i) char **p; int i; { return p[i]; } static char *f (char * (*g) (char **, int), char **p, ...) { char *s; va_list v; va_start (v,p); s = g (p, va_arg (v,int)); va_end (v); return s; } /* OSF 4.0 Compaq cc is some sort of almost-ANSI by default. It has function prototypes and stuff, but not '\xHH' hex character constants. These don't provoke an error unfortunately, instead are silently treated as 'x'. The following induces an error, until -std is added to get proper ANSI mode. Curiously '\x00'!='x' always comes out true, for an array size at least. It's necessary to write '\x00'==0 to get something that's true only with -std. */ int osf4_cc_array ['\x00' == 0 ? 1 : -1]; /* IBM C 6 for AIX is almost-ANSI by default, but it replaces macro parameters inside strings and character constants. */ #define FOO(x) 'x' int xlc6_cc_array[FOO(a) == 'x' ? 1 : -1]; int test (int i, double x); struct s1 {int (*f) (int a);}; struct s2 {int (*f) (double a);}; int pairnames (int, char **, FILE *(*)(struct buf *, struct stat *, int), int, int); int argc; char **argv; int main () { return f (e, argv, 0) != argv[0] || f (e, argv, 1) != argv[1]; ; return 0; } _ACEOF for ac_arg in '' -qlanglvl=extc89 -qlanglvl=ansi -std \ -Ae "-Aa -D_HPUX_SOURCE" "-Xc -D__EXTENSIONS__" do CC="$ac_save_CC $ac_arg" if ac_fn_c_try_compile "$LINENO"; then : ac_cv_prog_cc_c89=$ac_arg fi rm -f core conftest.err conftest.$ac_objext test "x$ac_cv_prog_cc_c89" != "xno" && break done rm -f conftest.$ac_ext CC=$ac_save_CC fi # AC_CACHE_VAL case "x$ac_cv_prog_cc_c89" in x) { $as_echo "$as_me:${as_lineno-$LINENO}: result: none needed" >&5 $as_echo "none needed" >&6; } ;; xno) { $as_echo "$as_me:${as_lineno-$LINENO}: result: unsupported" >&5 $as_echo "unsupported" >&6; } ;; *) CC="$CC $ac_cv_prog_cc_c89" { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_prog_cc_c89" >&5 $as_echo "$ac_cv_prog_cc_c89" >&6; } ;; esac if test "x$ac_cv_prog_cc_c89" != xno; then : fi ac_ext=c ac_cpp='$CPP $CPPFLAGS' ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_c_compiler_gnu depcc="$CC" am_compiler_list= { $as_echo "$as_me:${as_lineno-$LINENO}: checking dependency style of $depcc" >&5 $as_echo_n "checking dependency style of $depcc... " >&6; } if ${am_cv_CC_dependencies_compiler_type+:} false; then : $as_echo_n "(cached) " >&6 else if test -z "$AMDEP_TRUE" && test -f "$am_depcomp"; then # We make a subdir and do the tests there. Otherwise we can end up # making bogus files that we don't know about and never remove. For # instance it was reported that on HP-UX the gcc test will end up # making a dummy file named 'D' -- because '-MD' means "put the output # in D". rm -rf conftest.dir mkdir conftest.dir # Copy depcomp to subdir because otherwise we won't find it if we're # using a relative directory. cp "$am_depcomp" conftest.dir cd conftest.dir # We will build objects and dependencies in a subdirectory because # it helps to detect inapplicable dependency modes. For instance # both Tru64's cc and ICC support -MD to output dependencies as a # side effect of compilation, but ICC will put the dependencies in # the current directory while Tru64 will put them in the object # directory. mkdir sub am_cv_CC_dependencies_compiler_type=none if test "$am_compiler_list" = ""; then am_compiler_list=`sed -n 's/^#*\([a-zA-Z0-9]*\))$/\1/p' < ./depcomp` fi am__universal=false case " $depcc " in #( *\ -arch\ *\ -arch\ *) am__universal=true ;; esac for depmode in $am_compiler_list; do # Setup a source with many dependencies, because some compilers # like to wrap large dependency lists on column 80 (with \), and # we should not choose a depcomp mode which is confused by this. # # We need to recreate these files for each test, as the compiler may # overwrite some of them when testing with obscure command lines. # This happens at least with the AIX C compiler. : > sub/conftest.c for i in 1 2 3 4 5 6; do echo '#include "conftst'$i'.h"' >> sub/conftest.c # Using ": > sub/conftst$i.h" creates only sub/conftst1.h with # Solaris 10 /bin/sh. echo '/* dummy */' > sub/conftst$i.h done echo "${am__include} ${am__quote}sub/conftest.Po${am__quote}" > confmf # We check with '-c' and '-o' for the sake of the "dashmstdout" # mode. It turns out that the SunPro C++ compiler does not properly # handle '-M -o', and we need to detect this. Also, some Intel # versions had trouble with output in subdirs. am__obj=sub/conftest.${OBJEXT-o} am__minus_obj="-o $am__obj" case $depmode in gcc) # This depmode causes a compiler race in universal mode. test "$am__universal" = false || continue ;; nosideeffect) # After this tag, mechanisms are not by side-effect, so they'll # only be used when explicitly requested. if test "x$enable_dependency_tracking" = xyes; then continue else break fi ;; msvc7 | msvc7msys | msvisualcpp | msvcmsys) # This compiler won't grok '-c -o', but also, the minuso test has # not run yet. These depmodes are late enough in the game, and # so weak that their functioning should not be impacted. am__obj=conftest.${OBJEXT-o} am__minus_obj= ;; none) break ;; esac if depmode=$depmode \ source=sub/conftest.c object=$am__obj \ depfile=sub/conftest.Po tmpdepfile=sub/conftest.TPo \ $SHELL ./depcomp $depcc -c $am__minus_obj sub/conftest.c \ >/dev/null 2>conftest.err && grep sub/conftst1.h sub/conftest.Po > /dev/null 2>&1 && grep sub/conftst6.h sub/conftest.Po > /dev/null 2>&1 && grep $am__obj sub/conftest.Po > /dev/null 2>&1 && ${MAKE-make} -s -f confmf > /dev/null 2>&1; then # icc doesn't choke on unknown options, it will just issue warnings # or remarks (even with -Werror). So we grep stderr for any message # that says an option was ignored or not supported. # When given -MP, icc 7.0 and 7.1 complain thusly: # icc: Command line warning: ignoring option '-M'; no argument required # The diagnosis changed in icc 8.0: # icc: Command line remark: option '-MP' not supported if (grep 'ignoring option' conftest.err || grep 'not supported' conftest.err) >/dev/null 2>&1; then :; else am_cv_CC_dependencies_compiler_type=$depmode break fi fi done cd .. rm -rf conftest.dir else am_cv_CC_dependencies_compiler_type=none fi fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $am_cv_CC_dependencies_compiler_type" >&5 $as_echo "$am_cv_CC_dependencies_compiler_type" >&6; } CCDEPMODE=depmode=$am_cv_CC_dependencies_compiler_type if test "x$enable_dependency_tracking" != xno \ && test "$am_cv_CC_dependencies_compiler_type" = gcc3; then am__fastdepCC_TRUE= am__fastdepCC_FALSE='#' else am__fastdepCC_TRUE='#' am__fastdepCC_FALSE= fi am_cv_prog_cc_stdc=$ac_cv_prog_cc_stdc case `pwd` in *\ * | *\ *) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: Libtool does not cope well with whitespace in \`pwd\`" >&5 $as_echo "$as_me: WARNING: Libtool does not cope well with whitespace in \`pwd\`" >&2;} ;; esac macro_version='2.4.2' macro_revision='1.3337' ltmain="$ac_aux_dir/ltmain.sh" # Backslashify metacharacters that are still active within # double-quoted strings. sed_quote_subst='s/\(["`$\\]\)/\\\1/g' # Same as above, but do not quote variable references. double_quote_subst='s/\(["`\\]\)/\\\1/g' # Sed substitution to delay expansion of an escaped shell variable in a # double_quote_subst'ed string. delay_variable_subst='s/\\\\\\\\\\\$/\\\\\\$/g' # Sed substitution to delay expansion of an escaped single quote. delay_single_quote_subst='s/'\''/'\'\\\\\\\'\''/g' # Sed substitution to avoid accidental globbing in evaled expressions no_glob_subst='s/\*/\\\*/g' ECHO='\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\' ECHO=$ECHO$ECHO$ECHO$ECHO$ECHO ECHO=$ECHO$ECHO$ECHO$ECHO$ECHO$ECHO { $as_echo "$as_me:${as_lineno-$LINENO}: checking how to print strings" >&5 $as_echo_n "checking how to print strings... " >&6; } # Test print first, because it will be a builtin if present. if test "X`( print -r -- -n ) 2>/dev/null`" = X-n && \ test "X`print -r -- $ECHO 2>/dev/null`" = "X$ECHO"; then ECHO='print -r --' elif test "X`printf %s $ECHO 2>/dev/null`" = "X$ECHO"; then ECHO='printf %s\n' else # Use this function as a fallback that always works. func_fallback_echo () { eval 'cat <<_LTECHO_EOF $1 _LTECHO_EOF' } ECHO='func_fallback_echo' fi # func_echo_all arg... # Invoke $ECHO with all args, space-separated. func_echo_all () { $ECHO "" } case "$ECHO" in printf*) { $as_echo "$as_me:${as_lineno-$LINENO}: result: printf" >&5 $as_echo "printf" >&6; } ;; print*) { $as_echo "$as_me:${as_lineno-$LINENO}: result: print -r" >&5 $as_echo "print -r" >&6; } ;; *) { $as_echo "$as_me:${as_lineno-$LINENO}: result: cat" >&5 $as_echo "cat" >&6; } ;; esac { $as_echo "$as_me:${as_lineno-$LINENO}: checking for a sed that does not truncate output" >&5 $as_echo_n "checking for a sed that does not truncate output... " >&6; } if ${ac_cv_path_SED+:} false; then : $as_echo_n "(cached) " >&6 else ac_script=s/aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb/ for ac_i in 1 2 3 4 5 6 7; do ac_script="$ac_script$as_nl$ac_script" done echo "$ac_script" 2>/dev/null | sed 99q >conftest.sed { ac_script=; unset ac_script;} if test -z "$SED"; then ac_path_SED_found=false # Loop through the user's path and test for each of PROGNAME-LIST as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_prog in sed gsed; do for ac_exec_ext in '' $ac_executable_extensions; do ac_path_SED="$as_dir/$ac_prog$ac_exec_ext" as_fn_executable_p "$ac_path_SED" || continue # Check for GNU ac_path_SED and select it if it is found. # Check for GNU $ac_path_SED case `"$ac_path_SED" --version 2>&1` in *GNU*) ac_cv_path_SED="$ac_path_SED" ac_path_SED_found=:;; *) ac_count=0 $as_echo_n 0123456789 >"conftest.in" while : do cat "conftest.in" "conftest.in" >"conftest.tmp" mv "conftest.tmp" "conftest.in" cp "conftest.in" "conftest.nl" $as_echo '' >> "conftest.nl" "$ac_path_SED" -f conftest.sed < "conftest.nl" >"conftest.out" 2>/dev/null || break diff "conftest.out" "conftest.nl" >/dev/null 2>&1 || break as_fn_arith $ac_count + 1 && ac_count=$as_val if test $ac_count -gt ${ac_path_SED_max-0}; then # Best one so far, save it but keep looking for a better one ac_cv_path_SED="$ac_path_SED" ac_path_SED_max=$ac_count fi # 10*(2^10) chars as input seems more than enough test $ac_count -gt 10 && break done rm -f conftest.in conftest.tmp conftest.nl conftest.out;; esac $ac_path_SED_found && break 3 done done done IFS=$as_save_IFS if test -z "$ac_cv_path_SED"; then as_fn_error $? "no acceptable sed could be found in \$PATH" "$LINENO" 5 fi else ac_cv_path_SED=$SED fi fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_path_SED" >&5 $as_echo "$ac_cv_path_SED" >&6; } SED="$ac_cv_path_SED" rm -f conftest.sed test -z "$SED" && SED=sed Xsed="$SED -e 1s/^X//" { $as_echo "$as_me:${as_lineno-$LINENO}: checking for grep that handles long lines and -e" >&5 $as_echo_n "checking for grep that handles long lines and -e... " >&6; } if ${ac_cv_path_GREP+:} false; then : $as_echo_n "(cached) " >&6 else if test -z "$GREP"; then ac_path_GREP_found=false # Loop through the user's path and test for each of PROGNAME-LIST as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH$PATH_SEPARATOR/usr/xpg4/bin do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_prog in grep ggrep; do for ac_exec_ext in '' $ac_executable_extensions; do ac_path_GREP="$as_dir/$ac_prog$ac_exec_ext" as_fn_executable_p "$ac_path_GREP" || continue # Check for GNU ac_path_GREP and select it if it is found. # Check for GNU $ac_path_GREP case `"$ac_path_GREP" --version 2>&1` in *GNU*) ac_cv_path_GREP="$ac_path_GREP" ac_path_GREP_found=:;; *) ac_count=0 $as_echo_n 0123456789 >"conftest.in" while : do cat "conftest.in" "conftest.in" >"conftest.tmp" mv "conftest.tmp" "conftest.in" cp "conftest.in" "conftest.nl" $as_echo 'GREP' >> "conftest.nl" "$ac_path_GREP" -e 'GREP$' -e '-(cannot match)-' < "conftest.nl" >"conftest.out" 2>/dev/null || break diff "conftest.out" "conftest.nl" >/dev/null 2>&1 || break as_fn_arith $ac_count + 1 && ac_count=$as_val if test $ac_count -gt ${ac_path_GREP_max-0}; then # Best one so far, save it but keep looking for a better one ac_cv_path_GREP="$ac_path_GREP" ac_path_GREP_max=$ac_count fi # 10*(2^10) chars as input seems more than enough test $ac_count -gt 10 && break done rm -f conftest.in conftest.tmp conftest.nl conftest.out;; esac $ac_path_GREP_found && break 3 done done done IFS=$as_save_IFS if test -z "$ac_cv_path_GREP"; then as_fn_error $? "no acceptable grep could be found in $PATH$PATH_SEPARATOR/usr/xpg4/bin" "$LINENO" 5 fi else ac_cv_path_GREP=$GREP fi fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_path_GREP" >&5 $as_echo "$ac_cv_path_GREP" >&6; } GREP="$ac_cv_path_GREP" { $as_echo "$as_me:${as_lineno-$LINENO}: checking for egrep" >&5 $as_echo_n "checking for egrep... " >&6; } if ${ac_cv_path_EGREP+:} false; then : $as_echo_n "(cached) " >&6 else if echo a | $GREP -E '(a|b)' >/dev/null 2>&1 then ac_cv_path_EGREP="$GREP -E" else if test -z "$EGREP"; then ac_path_EGREP_found=false # Loop through the user's path and test for each of PROGNAME-LIST as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH$PATH_SEPARATOR/usr/xpg4/bin do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_prog in egrep; do for ac_exec_ext in '' $ac_executable_extensions; do ac_path_EGREP="$as_dir/$ac_prog$ac_exec_ext" as_fn_executable_p "$ac_path_EGREP" || continue # Check for GNU ac_path_EGREP and select it if it is found. # Check for GNU $ac_path_EGREP case `"$ac_path_EGREP" --version 2>&1` in *GNU*) ac_cv_path_EGREP="$ac_path_EGREP" ac_path_EGREP_found=:;; *) ac_count=0 $as_echo_n 0123456789 >"conftest.in" while : do cat "conftest.in" "conftest.in" >"conftest.tmp" mv "conftest.tmp" "conftest.in" cp "conftest.in" "conftest.nl" $as_echo 'EGREP' >> "conftest.nl" "$ac_path_EGREP" 'EGREP$' < "conftest.nl" >"conftest.out" 2>/dev/null || break diff "conftest.out" "conftest.nl" >/dev/null 2>&1 || break as_fn_arith $ac_count + 1 && ac_count=$as_val if test $ac_count -gt ${ac_path_EGREP_max-0}; then # Best one so far, save it but keep looking for a better one ac_cv_path_EGREP="$ac_path_EGREP" ac_path_EGREP_max=$ac_count fi # 10*(2^10) chars as input seems more than enough test $ac_count -gt 10 && break done rm -f conftest.in conftest.tmp conftest.nl conftest.out;; esac $ac_path_EGREP_found && break 3 done done done IFS=$as_save_IFS if test -z "$ac_cv_path_EGREP"; then as_fn_error $? "no acceptable egrep could be found in $PATH$PATH_SEPARATOR/usr/xpg4/bin" "$LINENO" 5 fi else ac_cv_path_EGREP=$EGREP fi fi fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_path_EGREP" >&5 $as_echo "$ac_cv_path_EGREP" >&6; } EGREP="$ac_cv_path_EGREP" { $as_echo "$as_me:${as_lineno-$LINENO}: checking for fgrep" >&5 $as_echo_n "checking for fgrep... " >&6; } if ${ac_cv_path_FGREP+:} false; then : $as_echo_n "(cached) " >&6 else if echo 'ab*c' | $GREP -F 'ab*c' >/dev/null 2>&1 then ac_cv_path_FGREP="$GREP -F" else if test -z "$FGREP"; then ac_path_FGREP_found=false # Loop through the user's path and test for each of PROGNAME-LIST as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH$PATH_SEPARATOR/usr/xpg4/bin do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_prog in fgrep; do for ac_exec_ext in '' $ac_executable_extensions; do ac_path_FGREP="$as_dir/$ac_prog$ac_exec_ext" as_fn_executable_p "$ac_path_FGREP" || continue # Check for GNU ac_path_FGREP and select it if it is found. # Check for GNU $ac_path_FGREP case `"$ac_path_FGREP" --version 2>&1` in *GNU*) ac_cv_path_FGREP="$ac_path_FGREP" ac_path_FGREP_found=:;; *) ac_count=0 $as_echo_n 0123456789 >"conftest.in" while : do cat "conftest.in" "conftest.in" >"conftest.tmp" mv "conftest.tmp" "conftest.in" cp "conftest.in" "conftest.nl" $as_echo 'FGREP' >> "conftest.nl" "$ac_path_FGREP" FGREP < "conftest.nl" >"conftest.out" 2>/dev/null || break diff "conftest.out" "conftest.nl" >/dev/null 2>&1 || break as_fn_arith $ac_count + 1 && ac_count=$as_val if test $ac_count -gt ${ac_path_FGREP_max-0}; then # Best one so far, save it but keep looking for a better one ac_cv_path_FGREP="$ac_path_FGREP" ac_path_FGREP_max=$ac_count fi # 10*(2^10) chars as input seems more than enough test $ac_count -gt 10 && break done rm -f conftest.in conftest.tmp conftest.nl conftest.out;; esac $ac_path_FGREP_found && break 3 done done done IFS=$as_save_IFS if test -z "$ac_cv_path_FGREP"; then as_fn_error $? "no acceptable fgrep could be found in $PATH$PATH_SEPARATOR/usr/xpg4/bin" "$LINENO" 5 fi else ac_cv_path_FGREP=$FGREP fi fi fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_path_FGREP" >&5 $as_echo "$ac_cv_path_FGREP" >&6; } FGREP="$ac_cv_path_FGREP" test -z "$GREP" && GREP=grep # Check whether --with-gnu-ld was given. if test "${with_gnu_ld+set}" = set; then : withval=$with_gnu_ld; test "$withval" = no || with_gnu_ld=yes else with_gnu_ld=no fi ac_prog=ld if test "$GCC" = yes; then # Check if gcc -print-prog-name=ld gives a path. { $as_echo "$as_me:${as_lineno-$LINENO}: checking for ld used by $CC" >&5 $as_echo_n "checking for ld used by $CC... " >&6; } case $host in *-*-mingw*) # gcc leaves a trailing carriage return which upsets mingw ac_prog=`($CC -print-prog-name=ld) 2>&5 | tr -d '\015'` ;; *) ac_prog=`($CC -print-prog-name=ld) 2>&5` ;; esac case $ac_prog in # Accept absolute paths. [\\/]* | ?:[\\/]*) re_direlt='/[^/][^/]*/\.\./' # Canonicalize the pathname of ld ac_prog=`$ECHO "$ac_prog"| $SED 's%\\\\%/%g'` while $ECHO "$ac_prog" | $GREP "$re_direlt" > /dev/null 2>&1; do ac_prog=`$ECHO $ac_prog| $SED "s%$re_direlt%/%"` done test -z "$LD" && LD="$ac_prog" ;; "") # If it fails, then pretend we aren't using GCC. ac_prog=ld ;; *) # If it is relative, then search for the first ld in PATH. with_gnu_ld=unknown ;; esac elif test "$with_gnu_ld" = yes; then { $as_echo "$as_me:${as_lineno-$LINENO}: checking for GNU ld" >&5 $as_echo_n "checking for GNU ld... " >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: checking for non-GNU ld" >&5 $as_echo_n "checking for non-GNU ld... " >&6; } fi if ${lt_cv_path_LD+:} false; then : $as_echo_n "(cached) " >&6 else if test -z "$LD"; then lt_save_ifs="$IFS"; IFS=$PATH_SEPARATOR for ac_dir in $PATH; do IFS="$lt_save_ifs" test -z "$ac_dir" && ac_dir=. if test -f "$ac_dir/$ac_prog" || test -f "$ac_dir/$ac_prog$ac_exeext"; then lt_cv_path_LD="$ac_dir/$ac_prog" # Check to see if the program is GNU ld. I'd rather use --version, # but apparently some variants of GNU ld only accept -v. # Break only if it was the GNU/non-GNU ld that we prefer. case `"$lt_cv_path_LD" -v 2>&1 &5 $as_echo "$LD" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi test -z "$LD" && as_fn_error $? "no acceptable ld found in \$PATH" "$LINENO" 5 { $as_echo "$as_me:${as_lineno-$LINENO}: checking if the linker ($LD) is GNU ld" >&5 $as_echo_n "checking if the linker ($LD) is GNU ld... " >&6; } if ${lt_cv_prog_gnu_ld+:} false; then : $as_echo_n "(cached) " >&6 else # I'd rather use --version here, but apparently some GNU lds only accept -v. case `$LD -v 2>&1 &5 $as_echo "$lt_cv_prog_gnu_ld" >&6; } with_gnu_ld=$lt_cv_prog_gnu_ld { $as_echo "$as_me:${as_lineno-$LINENO}: checking for BSD- or MS-compatible name lister (nm)" >&5 $as_echo_n "checking for BSD- or MS-compatible name lister (nm)... " >&6; } if ${lt_cv_path_NM+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$NM"; then # Let the user override the test. lt_cv_path_NM="$NM" else lt_nm_to_check="${ac_tool_prefix}nm" if test -n "$ac_tool_prefix" && test "$build" = "$host"; then lt_nm_to_check="$lt_nm_to_check nm" fi for lt_tmp_nm in $lt_nm_to_check; do lt_save_ifs="$IFS"; IFS=$PATH_SEPARATOR for ac_dir in $PATH /usr/ccs/bin/elf /usr/ccs/bin /usr/ucb /bin; do IFS="$lt_save_ifs" test -z "$ac_dir" && ac_dir=. tmp_nm="$ac_dir/$lt_tmp_nm" if test -f "$tmp_nm" || test -f "$tmp_nm$ac_exeext" ; then # Check to see if the nm accepts a BSD-compat flag. # Adding the `sed 1q' prevents false positives on HP-UX, which says: # nm: unknown option "B" ignored # Tru64's nm complains that /dev/null is an invalid object file case `"$tmp_nm" -B /dev/null 2>&1 | sed '1q'` in */dev/null* | *'Invalid file or object type'*) lt_cv_path_NM="$tmp_nm -B" break ;; *) case `"$tmp_nm" -p /dev/null 2>&1 | sed '1q'` in */dev/null*) lt_cv_path_NM="$tmp_nm -p" break ;; *) lt_cv_path_NM=${lt_cv_path_NM="$tmp_nm"} # keep the first match, but continue # so that we can try to find one that supports BSD flags ;; esac ;; esac fi done IFS="$lt_save_ifs" done : ${lt_cv_path_NM=no} fi fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_path_NM" >&5 $as_echo "$lt_cv_path_NM" >&6; } if test "$lt_cv_path_NM" != "no"; then NM="$lt_cv_path_NM" else # Didn't find any BSD compatible name lister, look for dumpbin. if test -n "$DUMPBIN"; then : # Let the user override the test. else if test -n "$ac_tool_prefix"; then for ac_prog in dumpbin "link -dump" do # Extract the first word of "$ac_tool_prefix$ac_prog", so it can be a program name with args. set dummy $ac_tool_prefix$ac_prog; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_DUMPBIN+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$DUMPBIN"; then ac_cv_prog_DUMPBIN="$DUMPBIN" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_prog_DUMPBIN="$ac_tool_prefix$ac_prog" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi DUMPBIN=$ac_cv_prog_DUMPBIN if test -n "$DUMPBIN"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $DUMPBIN" >&5 $as_echo "$DUMPBIN" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi test -n "$DUMPBIN" && break done fi if test -z "$DUMPBIN"; then ac_ct_DUMPBIN=$DUMPBIN for ac_prog in dumpbin "link -dump" do # Extract the first word of "$ac_prog", so it can be a program name with args. set dummy $ac_prog; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_ac_ct_DUMPBIN+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$ac_ct_DUMPBIN"; then ac_cv_prog_ac_ct_DUMPBIN="$ac_ct_DUMPBIN" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_prog_ac_ct_DUMPBIN="$ac_prog" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi ac_ct_DUMPBIN=$ac_cv_prog_ac_ct_DUMPBIN if test -n "$ac_ct_DUMPBIN"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_DUMPBIN" >&5 $as_echo "$ac_ct_DUMPBIN" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi test -n "$ac_ct_DUMPBIN" && break done if test "x$ac_ct_DUMPBIN" = x; then DUMPBIN=":" else case $cross_compiling:$ac_tool_warned in yes:) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 $as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} ac_tool_warned=yes ;; esac DUMPBIN=$ac_ct_DUMPBIN fi fi case `$DUMPBIN -symbols /dev/null 2>&1 | sed '1q'` in *COFF*) DUMPBIN="$DUMPBIN -symbols" ;; *) DUMPBIN=: ;; esac fi if test "$DUMPBIN" != ":"; then NM="$DUMPBIN" fi fi test -z "$NM" && NM=nm { $as_echo "$as_me:${as_lineno-$LINENO}: checking the name lister ($NM) interface" >&5 $as_echo_n "checking the name lister ($NM) interface... " >&6; } if ${lt_cv_nm_interface+:} false; then : $as_echo_n "(cached) " >&6 else lt_cv_nm_interface="BSD nm" echo "int some_variable = 0;" > conftest.$ac_ext (eval echo "\"\$as_me:$LINENO: $ac_compile\"" >&5) (eval "$ac_compile" 2>conftest.err) cat conftest.err >&5 (eval echo "\"\$as_me:$LINENO: $NM \\\"conftest.$ac_objext\\\"\"" >&5) (eval "$NM \"conftest.$ac_objext\"" 2>conftest.err > conftest.out) cat conftest.err >&5 (eval echo "\"\$as_me:$LINENO: output\"" >&5) cat conftest.out >&5 if $GREP 'External.*some_variable' conftest.out > /dev/null; then lt_cv_nm_interface="MS dumpbin" fi rm -f conftest* fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_nm_interface" >&5 $as_echo "$lt_cv_nm_interface" >&6; } # find the maximum length of command line arguments { $as_echo "$as_me:${as_lineno-$LINENO}: checking the maximum length of command line arguments" >&5 $as_echo_n "checking the maximum length of command line arguments... " >&6; } if ${lt_cv_sys_max_cmd_len+:} false; then : $as_echo_n "(cached) " >&6 else i=0 teststring="ABCD" case $build_os in msdosdjgpp*) # On DJGPP, this test can blow up pretty badly due to problems in libc # (any single argument exceeding 2000 bytes causes a buffer overrun # during glob expansion). Even if it were fixed, the result of this # check would be larger than it should be. lt_cv_sys_max_cmd_len=12288; # 12K is about right ;; gnu*) # Under GNU Hurd, this test is not required because there is # no limit to the length of command line arguments. # Libtool will interpret -1 as no limit whatsoever lt_cv_sys_max_cmd_len=-1; ;; cygwin* | mingw* | cegcc*) # On Win9x/ME, this test blows up -- it succeeds, but takes # about 5 minutes as the teststring grows exponentially. # Worse, since 9x/ME are not pre-emptively multitasking, # you end up with a "frozen" computer, even though with patience # the test eventually succeeds (with a max line length of 256k). # Instead, let's just punt: use the minimum linelength reported by # all of the supported platforms: 8192 (on NT/2K/XP). lt_cv_sys_max_cmd_len=8192; ;; mint*) # On MiNT this can take a long time and run out of memory. lt_cv_sys_max_cmd_len=8192; ;; amigaos*) # On AmigaOS with pdksh, this test takes hours, literally. # So we just punt and use a minimum line length of 8192. lt_cv_sys_max_cmd_len=8192; ;; netbsd* | freebsd* | openbsd* | darwin* | dragonfly*) # This has been around since 386BSD, at least. Likely further. if test -x /sbin/sysctl; then lt_cv_sys_max_cmd_len=`/sbin/sysctl -n kern.argmax` elif test -x /usr/sbin/sysctl; then lt_cv_sys_max_cmd_len=`/usr/sbin/sysctl -n kern.argmax` else lt_cv_sys_max_cmd_len=65536 # usable default for all BSDs fi # And add a safety zone lt_cv_sys_max_cmd_len=`expr $lt_cv_sys_max_cmd_len \/ 4` lt_cv_sys_max_cmd_len=`expr $lt_cv_sys_max_cmd_len \* 3` ;; interix*) # We know the value 262144 and hardcode it with a safety zone (like BSD) lt_cv_sys_max_cmd_len=196608 ;; os2*) # The test takes a long time on OS/2. lt_cv_sys_max_cmd_len=8192 ;; osf*) # Dr. Hans Ekkehard Plesser reports seeing a kernel panic running configure # due to this test when exec_disable_arg_limit is 1 on Tru64. It is not # nice to cause kernel panics so lets avoid the loop below. # First set a reasonable default. lt_cv_sys_max_cmd_len=16384 # if test -x /sbin/sysconfig; then case `/sbin/sysconfig -q proc exec_disable_arg_limit` in *1*) lt_cv_sys_max_cmd_len=-1 ;; esac fi ;; sco3.2v5*) lt_cv_sys_max_cmd_len=102400 ;; sysv5* | sco5v6* | sysv4.2uw2*) kargmax=`grep ARG_MAX /etc/conf/cf.d/stune 2>/dev/null` if test -n "$kargmax"; then lt_cv_sys_max_cmd_len=`echo $kargmax | sed 's/.*[ ]//'` else lt_cv_sys_max_cmd_len=32768 fi ;; *) lt_cv_sys_max_cmd_len=`(getconf ARG_MAX) 2> /dev/null` if test -n "$lt_cv_sys_max_cmd_len"; then lt_cv_sys_max_cmd_len=`expr $lt_cv_sys_max_cmd_len \/ 4` lt_cv_sys_max_cmd_len=`expr $lt_cv_sys_max_cmd_len \* 3` else # Make teststring a little bigger before we do anything with it. # a 1K string should be a reasonable start. for i in 1 2 3 4 5 6 7 8 ; do teststring=$teststring$teststring done SHELL=${SHELL-${CONFIG_SHELL-/bin/sh}} # If test is not a shell built-in, we'll probably end up computing a # maximum length that is only half of the actual maximum length, but # we can't tell. while { test "X"`env echo "$teststring$teststring" 2>/dev/null` \ = "X$teststring$teststring"; } >/dev/null 2>&1 && test $i != 17 # 1/2 MB should be enough do i=`expr $i + 1` teststring=$teststring$teststring done # Only check the string length outside the loop. lt_cv_sys_max_cmd_len=`expr "X$teststring" : ".*" 2>&1` teststring= # Add a significant safety factor because C++ compilers can tack on # massive amounts of additional arguments before passing them to the # linker. It appears as though 1/2 is a usable value. lt_cv_sys_max_cmd_len=`expr $lt_cv_sys_max_cmd_len \/ 2` fi ;; esac fi if test -n $lt_cv_sys_max_cmd_len ; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_sys_max_cmd_len" >&5 $as_echo "$lt_cv_sys_max_cmd_len" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: none" >&5 $as_echo "none" >&6; } fi max_cmd_len=$lt_cv_sys_max_cmd_len : ${CP="cp -f"} : ${MV="mv -f"} : ${RM="rm -f"} { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether the shell understands some XSI constructs" >&5 $as_echo_n "checking whether the shell understands some XSI constructs... " >&6; } # Try some XSI features xsi_shell=no ( _lt_dummy="a/b/c" test "${_lt_dummy##*/},${_lt_dummy%/*},${_lt_dummy#??}"${_lt_dummy%"$_lt_dummy"}, \ = c,a/b,b/c, \ && eval 'test $(( 1 + 1 )) -eq 2 \ && test "${#_lt_dummy}" -eq 5' ) >/dev/null 2>&1 \ && xsi_shell=yes { $as_echo "$as_me:${as_lineno-$LINENO}: result: $xsi_shell" >&5 $as_echo "$xsi_shell" >&6; } { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether the shell understands \"+=\"" >&5 $as_echo_n "checking whether the shell understands \"+=\"... " >&6; } lt_shell_append=no ( foo=bar; set foo baz; eval "$1+=\$2" && test "$foo" = barbaz ) \ >/dev/null 2>&1 \ && lt_shell_append=yes { $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_shell_append" >&5 $as_echo "$lt_shell_append" >&6; } if ( (MAIL=60; unset MAIL) || exit) >/dev/null 2>&1; then lt_unset=unset else lt_unset=false fi # test EBCDIC or ASCII case `echo X|tr X '\101'` in A) # ASCII based system # \n is not interpreted correctly by Solaris 8 /usr/ucb/tr lt_SP2NL='tr \040 \012' lt_NL2SP='tr \015\012 \040\040' ;; *) # EBCDIC based system lt_SP2NL='tr \100 \n' lt_NL2SP='tr \r\n \100\100' ;; esac { $as_echo "$as_me:${as_lineno-$LINENO}: checking how to convert $build file names to $host format" >&5 $as_echo_n "checking how to convert $build file names to $host format... " >&6; } if ${lt_cv_to_host_file_cmd+:} false; then : $as_echo_n "(cached) " >&6 else case $host in *-*-mingw* ) case $build in *-*-mingw* ) # actually msys lt_cv_to_host_file_cmd=func_convert_file_msys_to_w32 ;; *-*-cygwin* ) lt_cv_to_host_file_cmd=func_convert_file_cygwin_to_w32 ;; * ) # otherwise, assume *nix lt_cv_to_host_file_cmd=func_convert_file_nix_to_w32 ;; esac ;; *-*-cygwin* ) case $build in *-*-mingw* ) # actually msys lt_cv_to_host_file_cmd=func_convert_file_msys_to_cygwin ;; *-*-cygwin* ) lt_cv_to_host_file_cmd=func_convert_file_noop ;; * ) # otherwise, assume *nix lt_cv_to_host_file_cmd=func_convert_file_nix_to_cygwin ;; esac ;; * ) # unhandled hosts (and "normal" native builds) lt_cv_to_host_file_cmd=func_convert_file_noop ;; esac fi to_host_file_cmd=$lt_cv_to_host_file_cmd { $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_to_host_file_cmd" >&5 $as_echo "$lt_cv_to_host_file_cmd" >&6; } { $as_echo "$as_me:${as_lineno-$LINENO}: checking how to convert $build file names to toolchain format" >&5 $as_echo_n "checking how to convert $build file names to toolchain format... " >&6; } if ${lt_cv_to_tool_file_cmd+:} false; then : $as_echo_n "(cached) " >&6 else #assume ordinary cross tools, or native build. lt_cv_to_tool_file_cmd=func_convert_file_noop case $host in *-*-mingw* ) case $build in *-*-mingw* ) # actually msys lt_cv_to_tool_file_cmd=func_convert_file_msys_to_w32 ;; esac ;; esac fi to_tool_file_cmd=$lt_cv_to_tool_file_cmd { $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_to_tool_file_cmd" >&5 $as_echo "$lt_cv_to_tool_file_cmd" >&6; } { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $LD option to reload object files" >&5 $as_echo_n "checking for $LD option to reload object files... " >&6; } if ${lt_cv_ld_reload_flag+:} false; then : $as_echo_n "(cached) " >&6 else lt_cv_ld_reload_flag='-r' fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_ld_reload_flag" >&5 $as_echo "$lt_cv_ld_reload_flag" >&6; } reload_flag=$lt_cv_ld_reload_flag case $reload_flag in "" | " "*) ;; *) reload_flag=" $reload_flag" ;; esac reload_cmds='$LD$reload_flag -o $output$reload_objs' case $host_os in cygwin* | mingw* | pw32* | cegcc*) if test "$GCC" != yes; then reload_cmds=false fi ;; darwin*) if test "$GCC" = yes; then reload_cmds='$LTCC $LTCFLAGS -nostdlib ${wl}-r -o $output$reload_objs' else reload_cmds='$LD$reload_flag -o $output$reload_objs' fi ;; esac if test -n "$ac_tool_prefix"; then # Extract the first word of "${ac_tool_prefix}objdump", so it can be a program name with args. set dummy ${ac_tool_prefix}objdump; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_OBJDUMP+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$OBJDUMP"; then ac_cv_prog_OBJDUMP="$OBJDUMP" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_prog_OBJDUMP="${ac_tool_prefix}objdump" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi OBJDUMP=$ac_cv_prog_OBJDUMP if test -n "$OBJDUMP"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $OBJDUMP" >&5 $as_echo "$OBJDUMP" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi fi if test -z "$ac_cv_prog_OBJDUMP"; then ac_ct_OBJDUMP=$OBJDUMP # Extract the first word of "objdump", so it can be a program name with args. set dummy objdump; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_ac_ct_OBJDUMP+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$ac_ct_OBJDUMP"; then ac_cv_prog_ac_ct_OBJDUMP="$ac_ct_OBJDUMP" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_prog_ac_ct_OBJDUMP="objdump" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi ac_ct_OBJDUMP=$ac_cv_prog_ac_ct_OBJDUMP if test -n "$ac_ct_OBJDUMP"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_OBJDUMP" >&5 $as_echo "$ac_ct_OBJDUMP" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi if test "x$ac_ct_OBJDUMP" = x; then OBJDUMP="false" else case $cross_compiling:$ac_tool_warned in yes:) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 $as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} ac_tool_warned=yes ;; esac OBJDUMP=$ac_ct_OBJDUMP fi else OBJDUMP="$ac_cv_prog_OBJDUMP" fi test -z "$OBJDUMP" && OBJDUMP=objdump { $as_echo "$as_me:${as_lineno-$LINENO}: checking how to recognize dependent libraries" >&5 $as_echo_n "checking how to recognize dependent libraries... " >&6; } if ${lt_cv_deplibs_check_method+:} false; then : $as_echo_n "(cached) " >&6 else lt_cv_file_magic_cmd='$MAGIC_CMD' lt_cv_file_magic_test_file= lt_cv_deplibs_check_method='unknown' # Need to set the preceding variable on all platforms that support # interlibrary dependencies. # 'none' -- dependencies not supported. # `unknown' -- same as none, but documents that we really don't know. # 'pass_all' -- all dependencies passed with no checks. # 'test_compile' -- check by making test program. # 'file_magic [[regex]]' -- check by looking for files in library path # which responds to the $file_magic_cmd with a given extended regex. # If you have `file' or equivalent on your system and you're not sure # whether `pass_all' will *always* work, you probably want this one. case $host_os in aix[4-9]*) lt_cv_deplibs_check_method=pass_all ;; beos*) lt_cv_deplibs_check_method=pass_all ;; bsdi[45]*) lt_cv_deplibs_check_method='file_magic ELF [0-9][0-9]*-bit [ML]SB (shared object|dynamic lib)' lt_cv_file_magic_cmd='/usr/bin/file -L' lt_cv_file_magic_test_file=/shlib/libc.so ;; cygwin*) # func_win32_libid is a shell function defined in ltmain.sh lt_cv_deplibs_check_method='file_magic ^x86 archive import|^x86 DLL' lt_cv_file_magic_cmd='func_win32_libid' ;; mingw* | pw32*) # Base MSYS/MinGW do not provide the 'file' command needed by # func_win32_libid shell function, so use a weaker test based on 'objdump', # unless we find 'file', for example because we are cross-compiling. # func_win32_libid assumes BSD nm, so disallow it if using MS dumpbin. if ( test "$lt_cv_nm_interface" = "BSD nm" && file / ) >/dev/null 2>&1; then lt_cv_deplibs_check_method='file_magic ^x86 archive import|^x86 DLL' lt_cv_file_magic_cmd='func_win32_libid' else # Keep this pattern in sync with the one in func_win32_libid. lt_cv_deplibs_check_method='file_magic file format (pei*-i386(.*architecture: i386)?|pe-arm-wince|pe-x86-64)' lt_cv_file_magic_cmd='$OBJDUMP -f' fi ;; cegcc*) # use the weaker test based on 'objdump'. See mingw*. lt_cv_deplibs_check_method='file_magic file format pe-arm-.*little(.*architecture: arm)?' lt_cv_file_magic_cmd='$OBJDUMP -f' ;; darwin* | rhapsody*) lt_cv_deplibs_check_method=pass_all ;; freebsd* | dragonfly*) if echo __ELF__ | $CC -E - | $GREP __ELF__ > /dev/null; then case $host_cpu in i*86 ) # Not sure whether the presence of OpenBSD here was a mistake. # Let's accept both of them until this is cleared up. lt_cv_deplibs_check_method='file_magic (FreeBSD|OpenBSD|DragonFly)/i[3-9]86 (compact )?demand paged shared library' lt_cv_file_magic_cmd=/usr/bin/file lt_cv_file_magic_test_file=`echo /usr/lib/libc.so.*` ;; esac else lt_cv_deplibs_check_method=pass_all fi ;; gnu*) lt_cv_deplibs_check_method=pass_all ;; haiku*) lt_cv_deplibs_check_method=pass_all ;; hpux10.20* | hpux11*) lt_cv_file_magic_cmd=/usr/bin/file case $host_cpu in ia64*) lt_cv_deplibs_check_method='file_magic (s[0-9][0-9][0-9]|ELF-[0-9][0-9]) shared object file - IA64' lt_cv_file_magic_test_file=/usr/lib/hpux32/libc.so ;; hppa*64*) lt_cv_deplibs_check_method='file_magic (s[0-9][0-9][0-9]|ELF[ -][0-9][0-9])(-bit)?( [LM]SB)? shared object( file)?[, -]* PA-RISC [0-9]\.[0-9]' lt_cv_file_magic_test_file=/usr/lib/pa20_64/libc.sl ;; *) lt_cv_deplibs_check_method='file_magic (s[0-9][0-9][0-9]|PA-RISC[0-9]\.[0-9]) shared library' lt_cv_file_magic_test_file=/usr/lib/libc.sl ;; esac ;; interix[3-9]*) # PIC code is broken on Interix 3.x, that's why |\.a not |_pic\.a here lt_cv_deplibs_check_method='match_pattern /lib[^/]+(\.so|\.a)$' ;; irix5* | irix6* | nonstopux*) case $LD in *-32|*"-32 ") libmagic=32-bit;; *-n32|*"-n32 ") libmagic=N32;; *-64|*"-64 ") libmagic=64-bit;; *) libmagic=never-match;; esac lt_cv_deplibs_check_method=pass_all ;; # This must be glibc/ELF. linux* | k*bsd*-gnu | kopensolaris*-gnu) lt_cv_deplibs_check_method=pass_all ;; netbsd*) if echo __ELF__ | $CC -E - | $GREP __ELF__ > /dev/null; then lt_cv_deplibs_check_method='match_pattern /lib[^/]+(\.so\.[0-9]+\.[0-9]+|_pic\.a)$' else lt_cv_deplibs_check_method='match_pattern /lib[^/]+(\.so|_pic\.a)$' fi ;; newos6*) lt_cv_deplibs_check_method='file_magic ELF [0-9][0-9]*-bit [ML]SB (executable|dynamic lib)' lt_cv_file_magic_cmd=/usr/bin/file lt_cv_file_magic_test_file=/usr/lib/libnls.so ;; *nto* | *qnx*) lt_cv_deplibs_check_method=pass_all ;; openbsd*) if test -z "`echo __ELF__ | $CC -E - | $GREP __ELF__`" || test "$host_os-$host_cpu" = "openbsd2.8-powerpc"; then lt_cv_deplibs_check_method='match_pattern /lib[^/]+(\.so\.[0-9]+\.[0-9]+|\.so|_pic\.a)$' else lt_cv_deplibs_check_method='match_pattern /lib[^/]+(\.so\.[0-9]+\.[0-9]+|_pic\.a)$' fi ;; osf3* | osf4* | osf5*) lt_cv_deplibs_check_method=pass_all ;; rdos*) lt_cv_deplibs_check_method=pass_all ;; solaris*) lt_cv_deplibs_check_method=pass_all ;; sysv5* | sco3.2v5* | sco5v6* | unixware* | OpenUNIX* | sysv4*uw2*) lt_cv_deplibs_check_method=pass_all ;; sysv4 | sysv4.3*) case $host_vendor in motorola) lt_cv_deplibs_check_method='file_magic ELF [0-9][0-9]*-bit [ML]SB (shared object|dynamic lib) M[0-9][0-9]* Version [0-9]' lt_cv_file_magic_test_file=`echo /usr/lib/libc.so*` ;; ncr) lt_cv_deplibs_check_method=pass_all ;; sequent) lt_cv_file_magic_cmd='/bin/file' lt_cv_deplibs_check_method='file_magic ELF [0-9][0-9]*-bit [LM]SB (shared object|dynamic lib )' ;; sni) lt_cv_file_magic_cmd='/bin/file' lt_cv_deplibs_check_method="file_magic ELF [0-9][0-9]*-bit [LM]SB dynamic lib" lt_cv_file_magic_test_file=/lib/libc.so ;; siemens) lt_cv_deplibs_check_method=pass_all ;; pc) lt_cv_deplibs_check_method=pass_all ;; esac ;; tpf*) lt_cv_deplibs_check_method=pass_all ;; esac fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_deplibs_check_method" >&5 $as_echo "$lt_cv_deplibs_check_method" >&6; } file_magic_glob= want_nocaseglob=no if test "$build" = "$host"; then case $host_os in mingw* | pw32*) if ( shopt | grep nocaseglob ) >/dev/null 2>&1; then want_nocaseglob=yes else file_magic_glob=`echo aAbBcCdDeEfFgGhHiIjJkKlLmMnNoOpPqQrRsStTuUvVwWxXyYzZ | $SED -e "s/\(..\)/s\/[\1]\/[\1]\/g;/g"` fi ;; esac fi file_magic_cmd=$lt_cv_file_magic_cmd deplibs_check_method=$lt_cv_deplibs_check_method test -z "$deplibs_check_method" && deplibs_check_method=unknown if test -n "$ac_tool_prefix"; then # Extract the first word of "${ac_tool_prefix}dlltool", so it can be a program name with args. set dummy ${ac_tool_prefix}dlltool; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_DLLTOOL+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$DLLTOOL"; then ac_cv_prog_DLLTOOL="$DLLTOOL" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_prog_DLLTOOL="${ac_tool_prefix}dlltool" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi DLLTOOL=$ac_cv_prog_DLLTOOL if test -n "$DLLTOOL"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $DLLTOOL" >&5 $as_echo "$DLLTOOL" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi fi if test -z "$ac_cv_prog_DLLTOOL"; then ac_ct_DLLTOOL=$DLLTOOL # Extract the first word of "dlltool", so it can be a program name with args. set dummy dlltool; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_ac_ct_DLLTOOL+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$ac_ct_DLLTOOL"; then ac_cv_prog_ac_ct_DLLTOOL="$ac_ct_DLLTOOL" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_prog_ac_ct_DLLTOOL="dlltool" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi ac_ct_DLLTOOL=$ac_cv_prog_ac_ct_DLLTOOL if test -n "$ac_ct_DLLTOOL"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_DLLTOOL" >&5 $as_echo "$ac_ct_DLLTOOL" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi if test "x$ac_ct_DLLTOOL" = x; then DLLTOOL="false" else case $cross_compiling:$ac_tool_warned in yes:) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 $as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} ac_tool_warned=yes ;; esac DLLTOOL=$ac_ct_DLLTOOL fi else DLLTOOL="$ac_cv_prog_DLLTOOL" fi test -z "$DLLTOOL" && DLLTOOL=dlltool { $as_echo "$as_me:${as_lineno-$LINENO}: checking how to associate runtime and link libraries" >&5 $as_echo_n "checking how to associate runtime and link libraries... " >&6; } if ${lt_cv_sharedlib_from_linklib_cmd+:} false; then : $as_echo_n "(cached) " >&6 else lt_cv_sharedlib_from_linklib_cmd='unknown' case $host_os in cygwin* | mingw* | pw32* | cegcc*) # two different shell functions defined in ltmain.sh # decide which to use based on capabilities of $DLLTOOL case `$DLLTOOL --help 2>&1` in *--identify-strict*) lt_cv_sharedlib_from_linklib_cmd=func_cygming_dll_for_implib ;; *) lt_cv_sharedlib_from_linklib_cmd=func_cygming_dll_for_implib_fallback ;; esac ;; *) # fallback: assume linklib IS sharedlib lt_cv_sharedlib_from_linklib_cmd="$ECHO" ;; esac fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_sharedlib_from_linklib_cmd" >&5 $as_echo "$lt_cv_sharedlib_from_linklib_cmd" >&6; } sharedlib_from_linklib_cmd=$lt_cv_sharedlib_from_linklib_cmd test -z "$sharedlib_from_linklib_cmd" && sharedlib_from_linklib_cmd=$ECHO if test -n "$ac_tool_prefix"; then for ac_prog in ar do # Extract the first word of "$ac_tool_prefix$ac_prog", so it can be a program name with args. set dummy $ac_tool_prefix$ac_prog; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_AR+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$AR"; then ac_cv_prog_AR="$AR" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_prog_AR="$ac_tool_prefix$ac_prog" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi AR=$ac_cv_prog_AR if test -n "$AR"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $AR" >&5 $as_echo "$AR" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi test -n "$AR" && break done fi if test -z "$AR"; then ac_ct_AR=$AR for ac_prog in ar do # Extract the first word of "$ac_prog", so it can be a program name with args. set dummy $ac_prog; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_ac_ct_AR+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$ac_ct_AR"; then ac_cv_prog_ac_ct_AR="$ac_ct_AR" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_prog_ac_ct_AR="$ac_prog" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi ac_ct_AR=$ac_cv_prog_ac_ct_AR if test -n "$ac_ct_AR"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_AR" >&5 $as_echo "$ac_ct_AR" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi test -n "$ac_ct_AR" && break done if test "x$ac_ct_AR" = x; then AR="false" else case $cross_compiling:$ac_tool_warned in yes:) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 $as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} ac_tool_warned=yes ;; esac AR=$ac_ct_AR fi fi : ${AR=ar} : ${AR_FLAGS=cru} { $as_echo "$as_me:${as_lineno-$LINENO}: checking for archiver @FILE support" >&5 $as_echo_n "checking for archiver @FILE support... " >&6; } if ${lt_cv_ar_at_file+:} false; then : $as_echo_n "(cached) " >&6 else lt_cv_ar_at_file=no cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : echo conftest.$ac_objext > conftest.lst lt_ar_try='$AR $AR_FLAGS libconftest.a @conftest.lst >&5' { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$lt_ar_try\""; } >&5 (eval $lt_ar_try) 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; } if test "$ac_status" -eq 0; then # Ensure the archiver fails upon bogus file names. rm -f conftest.$ac_objext libconftest.a { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$lt_ar_try\""; } >&5 (eval $lt_ar_try) 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; } if test "$ac_status" -ne 0; then lt_cv_ar_at_file=@ fi fi rm -f conftest.* libconftest.a fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_ar_at_file" >&5 $as_echo "$lt_cv_ar_at_file" >&6; } if test "x$lt_cv_ar_at_file" = xno; then archiver_list_spec= else archiver_list_spec=$lt_cv_ar_at_file fi if test -n "$ac_tool_prefix"; then # Extract the first word of "${ac_tool_prefix}strip", so it can be a program name with args. set dummy ${ac_tool_prefix}strip; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_STRIP+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$STRIP"; then ac_cv_prog_STRIP="$STRIP" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_prog_STRIP="${ac_tool_prefix}strip" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi STRIP=$ac_cv_prog_STRIP if test -n "$STRIP"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $STRIP" >&5 $as_echo "$STRIP" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi fi if test -z "$ac_cv_prog_STRIP"; then ac_ct_STRIP=$STRIP # Extract the first word of "strip", so it can be a program name with args. set dummy strip; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_ac_ct_STRIP+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$ac_ct_STRIP"; then ac_cv_prog_ac_ct_STRIP="$ac_ct_STRIP" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_prog_ac_ct_STRIP="strip" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi ac_ct_STRIP=$ac_cv_prog_ac_ct_STRIP if test -n "$ac_ct_STRIP"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_STRIP" >&5 $as_echo "$ac_ct_STRIP" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi if test "x$ac_ct_STRIP" = x; then STRIP=":" else case $cross_compiling:$ac_tool_warned in yes:) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 $as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} ac_tool_warned=yes ;; esac STRIP=$ac_ct_STRIP fi else STRIP="$ac_cv_prog_STRIP" fi test -z "$STRIP" && STRIP=: if test -n "$ac_tool_prefix"; then # Extract the first word of "${ac_tool_prefix}ranlib", so it can be a program name with args. set dummy ${ac_tool_prefix}ranlib; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_RANLIB+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$RANLIB"; then ac_cv_prog_RANLIB="$RANLIB" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_prog_RANLIB="${ac_tool_prefix}ranlib" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi RANLIB=$ac_cv_prog_RANLIB if test -n "$RANLIB"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $RANLIB" >&5 $as_echo "$RANLIB" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi fi if test -z "$ac_cv_prog_RANLIB"; then ac_ct_RANLIB=$RANLIB # Extract the first word of "ranlib", so it can be a program name with args. set dummy ranlib; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_ac_ct_RANLIB+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$ac_ct_RANLIB"; then ac_cv_prog_ac_ct_RANLIB="$ac_ct_RANLIB" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_prog_ac_ct_RANLIB="ranlib" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi ac_ct_RANLIB=$ac_cv_prog_ac_ct_RANLIB if test -n "$ac_ct_RANLIB"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_RANLIB" >&5 $as_echo "$ac_ct_RANLIB" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi if test "x$ac_ct_RANLIB" = x; then RANLIB=":" else case $cross_compiling:$ac_tool_warned in yes:) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 $as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} ac_tool_warned=yes ;; esac RANLIB=$ac_ct_RANLIB fi else RANLIB="$ac_cv_prog_RANLIB" fi test -z "$RANLIB" && RANLIB=: # Determine commands to create old-style static archives. old_archive_cmds='$AR $AR_FLAGS $oldlib$oldobjs' old_postinstall_cmds='chmod 644 $oldlib' old_postuninstall_cmds= if test -n "$RANLIB"; then case $host_os in openbsd*) old_postinstall_cmds="$old_postinstall_cmds~\$RANLIB -t \$tool_oldlib" ;; *) old_postinstall_cmds="$old_postinstall_cmds~\$RANLIB \$tool_oldlib" ;; esac old_archive_cmds="$old_archive_cmds~\$RANLIB \$tool_oldlib" fi case $host_os in darwin*) lock_old_archive_extraction=yes ;; *) lock_old_archive_extraction=no ;; esac # If no C compiler was specified, use CC. LTCC=${LTCC-"$CC"} # If no C compiler flags were specified, use CFLAGS. LTCFLAGS=${LTCFLAGS-"$CFLAGS"} # Allow CC to be a program name with arguments. compiler=$CC # Check for command to grab the raw symbol name followed by C symbol from nm. { $as_echo "$as_me:${as_lineno-$LINENO}: checking command to parse $NM output from $compiler object" >&5 $as_echo_n "checking command to parse $NM output from $compiler object... " >&6; } if ${lt_cv_sys_global_symbol_pipe+:} false; then : $as_echo_n "(cached) " >&6 else # These are sane defaults that work on at least a few old systems. # [They come from Ultrix. What could be older than Ultrix?!! ;)] # Character class describing NM global symbol codes. symcode='[BCDEGRST]' # Regexp to match symbols that can be accessed directly from C. sympat='\([_A-Za-z][_A-Za-z0-9]*\)' # Define system-specific variables. case $host_os in aix*) symcode='[BCDT]' ;; cygwin* | mingw* | pw32* | cegcc*) symcode='[ABCDGISTW]' ;; hpux*) if test "$host_cpu" = ia64; then symcode='[ABCDEGRST]' fi ;; irix* | nonstopux*) symcode='[BCDEGRST]' ;; osf*) symcode='[BCDEGQRST]' ;; solaris*) symcode='[BDRT]' ;; sco3.2v5*) symcode='[DT]' ;; sysv4.2uw2*) symcode='[DT]' ;; sysv5* | sco5v6* | unixware* | OpenUNIX*) symcode='[ABDT]' ;; sysv4) symcode='[DFNSTU]' ;; esac # If we're using GNU nm, then use its standard symbol codes. case `$NM -V 2>&1` in *GNU* | *'with BFD'*) symcode='[ABCDGIRSTW]' ;; esac # Transform an extracted symbol line into a proper C declaration. # Some systems (esp. on ia64) link data and code symbols differently, # so use this general approach. lt_cv_sys_global_symbol_to_cdecl="sed -n -e 's/^T .* \(.*\)$/extern int \1();/p' -e 's/^$symcode* .* \(.*\)$/extern char \1;/p'" # Transform an extracted symbol line into symbol name and symbol address lt_cv_sys_global_symbol_to_c_name_address="sed -n -e 's/^: \([^ ]*\)[ ]*$/ {\\\"\1\\\", (void *) 0},/p' -e 's/^$symcode* \([^ ]*\) \([^ ]*\)$/ {\"\2\", (void *) \&\2},/p'" lt_cv_sys_global_symbol_to_c_name_address_lib_prefix="sed -n -e 's/^: \([^ ]*\)[ ]*$/ {\\\"\1\\\", (void *) 0},/p' -e 's/^$symcode* \([^ ]*\) \(lib[^ ]*\)$/ {\"\2\", (void *) \&\2},/p' -e 's/^$symcode* \([^ ]*\) \([^ ]*\)$/ {\"lib\2\", (void *) \&\2},/p'" # Handle CRLF in mingw tool chain opt_cr= case $build_os in mingw*) opt_cr=`$ECHO 'x\{0,1\}' | tr x '\015'` # option cr in regexp ;; esac # Try without a prefix underscore, then with it. for ac_symprfx in "" "_"; do # Transform symcode, sympat, and symprfx into a raw symbol and a C symbol. symxfrm="\\1 $ac_symprfx\\2 \\2" # Write the raw and C identifiers. if test "$lt_cv_nm_interface" = "MS dumpbin"; then # Fake it for dumpbin and say T for any non-static function # and D for any global variable. # Also find C++ and __fastcall symbols from MSVC++, # which start with @ or ?. lt_cv_sys_global_symbol_pipe="$AWK '"\ " {last_section=section; section=\$ 3};"\ " /^COFF SYMBOL TABLE/{for(i in hide) delete hide[i]};"\ " /Section length .*#relocs.*(pick any)/{hide[last_section]=1};"\ " \$ 0!~/External *\|/{next};"\ " / 0+ UNDEF /{next}; / UNDEF \([^|]\)*()/{next};"\ " {if(hide[section]) next};"\ " {f=0}; \$ 0~/\(\).*\|/{f=1}; {printf f ? \"T \" : \"D \"};"\ " {split(\$ 0, a, /\||\r/); split(a[2], s)};"\ " s[1]~/^[@?]/{print s[1], s[1]; next};"\ " s[1]~prfx {split(s[1],t,\"@\"); print t[1], substr(t[1],length(prfx))}"\ " ' prfx=^$ac_symprfx" else lt_cv_sys_global_symbol_pipe="sed -n -e 's/^.*[ ]\($symcode$symcode*\)[ ][ ]*$ac_symprfx$sympat$opt_cr$/$symxfrm/p'" fi lt_cv_sys_global_symbol_pipe="$lt_cv_sys_global_symbol_pipe | sed '/ __gnu_lto/d'" # Check to see that the pipe works correctly. pipe_works=no rm -f conftest* cat > conftest.$ac_ext <<_LT_EOF #ifdef __cplusplus extern "C" { #endif char nm_test_var; void nm_test_func(void); void nm_test_func(void){} #ifdef __cplusplus } #endif int main(){nm_test_var='a';nm_test_func();return(0);} _LT_EOF if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_compile\""; } >&5 (eval $ac_compile) 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; }; then # Now try to grab the symbols. nlist=conftest.nm if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$NM conftest.$ac_objext \| "$lt_cv_sys_global_symbol_pipe" \> $nlist\""; } >&5 (eval $NM conftest.$ac_objext \| "$lt_cv_sys_global_symbol_pipe" \> $nlist) 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; } && test -s "$nlist"; then # Try sorting and uniquifying the output. if sort "$nlist" | uniq > "$nlist"T; then mv -f "$nlist"T "$nlist" else rm -f "$nlist"T fi # Make sure that we snagged all the symbols we need. if $GREP ' nm_test_var$' "$nlist" >/dev/null; then if $GREP ' nm_test_func$' "$nlist" >/dev/null; then cat <<_LT_EOF > conftest.$ac_ext /* Keep this code in sync between libtool.m4, ltmain, lt_system.h, and tests. */ #if defined(_WIN32) || defined(__CYGWIN__) || defined(_WIN32_WCE) /* DATA imports from DLLs on WIN32 con't be const, because runtime relocations are performed -- see ld's documentation on pseudo-relocs. */ # define LT_DLSYM_CONST #elif defined(__osf__) /* This system does not cope well with relocations in const data. */ # define LT_DLSYM_CONST #else # define LT_DLSYM_CONST const #endif #ifdef __cplusplus extern "C" { #endif _LT_EOF # Now generate the symbol file. eval "$lt_cv_sys_global_symbol_to_cdecl"' < "$nlist" | $GREP -v main >> conftest.$ac_ext' cat <<_LT_EOF >> conftest.$ac_ext /* The mapping between symbol names and symbols. */ LT_DLSYM_CONST struct { const char *name; void *address; } lt__PROGRAM__LTX_preloaded_symbols[] = { { "@PROGRAM@", (void *) 0 }, _LT_EOF $SED "s/^$symcode$symcode* \(.*\) \(.*\)$/ {\"\2\", (void *) \&\2},/" < "$nlist" | $GREP -v main >> conftest.$ac_ext cat <<\_LT_EOF >> conftest.$ac_ext {0, (void *) 0} }; /* This works around a problem in FreeBSD linker */ #ifdef FREEBSD_WORKAROUND static const void *lt_preloaded_setup() { return lt__PROGRAM__LTX_preloaded_symbols; } #endif #ifdef __cplusplus } #endif _LT_EOF # Now try linking the two files. mv conftest.$ac_objext conftstm.$ac_objext lt_globsym_save_LIBS=$LIBS lt_globsym_save_CFLAGS=$CFLAGS LIBS="conftstm.$ac_objext" CFLAGS="$CFLAGS$lt_prog_compiler_no_builtin_flag" if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_link\""; } >&5 (eval $ac_link) 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; } && test -s conftest${ac_exeext}; then pipe_works=yes fi LIBS=$lt_globsym_save_LIBS CFLAGS=$lt_globsym_save_CFLAGS else echo "cannot find nm_test_func in $nlist" >&5 fi else echo "cannot find nm_test_var in $nlist" >&5 fi else echo "cannot run $lt_cv_sys_global_symbol_pipe" >&5 fi else echo "$progname: failed program was:" >&5 cat conftest.$ac_ext >&5 fi rm -rf conftest* conftst* # Do not use the global_symbol_pipe unless it works. if test "$pipe_works" = yes; then break else lt_cv_sys_global_symbol_pipe= fi done fi if test -z "$lt_cv_sys_global_symbol_pipe"; then lt_cv_sys_global_symbol_to_cdecl= fi if test -z "$lt_cv_sys_global_symbol_pipe$lt_cv_sys_global_symbol_to_cdecl"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: failed" >&5 $as_echo "failed" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: ok" >&5 $as_echo "ok" >&6; } fi # Response file support. if test "$lt_cv_nm_interface" = "MS dumpbin"; then nm_file_list_spec='@' elif $NM --help 2>/dev/null | grep '[@]FILE' >/dev/null; then nm_file_list_spec='@' fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking for sysroot" >&5 $as_echo_n "checking for sysroot... " >&6; } # Check whether --with-sysroot was given. if test "${with_sysroot+set}" = set; then : withval=$with_sysroot; else with_sysroot=no fi lt_sysroot= case ${with_sysroot} in #( yes) if test "$GCC" = yes; then lt_sysroot=`$CC --print-sysroot 2>/dev/null` fi ;; #( /*) lt_sysroot=`echo "$with_sysroot" | sed -e "$sed_quote_subst"` ;; #( no|'') ;; #( *) { $as_echo "$as_me:${as_lineno-$LINENO}: result: ${with_sysroot}" >&5 $as_echo "${with_sysroot}" >&6; } as_fn_error $? "The sysroot must be an absolute path." "$LINENO" 5 ;; esac { $as_echo "$as_me:${as_lineno-$LINENO}: result: ${lt_sysroot:-no}" >&5 $as_echo "${lt_sysroot:-no}" >&6; } # Check whether --enable-libtool-lock was given. if test "${enable_libtool_lock+set}" = set; then : enableval=$enable_libtool_lock; fi test "x$enable_libtool_lock" != xno && enable_libtool_lock=yes # Some flags need to be propagated to the compiler or linker for good # libtool support. case $host in ia64-*-hpux*) # Find out which ABI we are using. echo 'int i;' > conftest.$ac_ext if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_compile\""; } >&5 (eval $ac_compile) 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; }; then case `/usr/bin/file conftest.$ac_objext` in *ELF-32*) HPUX_IA64_MODE="32" ;; *ELF-64*) HPUX_IA64_MODE="64" ;; esac fi rm -rf conftest* ;; *-*-irix6*) # Find out which ABI we are using. echo '#line '$LINENO' "configure"' > conftest.$ac_ext if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_compile\""; } >&5 (eval $ac_compile) 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; }; then if test "$lt_cv_prog_gnu_ld" = yes; then case `/usr/bin/file conftest.$ac_objext` in *32-bit*) LD="${LD-ld} -melf32bsmip" ;; *N32*) LD="${LD-ld} -melf32bmipn32" ;; *64-bit*) LD="${LD-ld} -melf64bmip" ;; esac else case `/usr/bin/file conftest.$ac_objext` in *32-bit*) LD="${LD-ld} -32" ;; *N32*) LD="${LD-ld} -n32" ;; *64-bit*) LD="${LD-ld} -64" ;; esac fi fi rm -rf conftest* ;; x86_64-*kfreebsd*-gnu|x86_64-*linux*|ppc*-*linux*|powerpc*-*linux*| \ s390*-*linux*|s390*-*tpf*|sparc*-*linux*) # Find out which ABI we are using. echo 'int i;' > conftest.$ac_ext if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_compile\""; } >&5 (eval $ac_compile) 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; }; then case `/usr/bin/file conftest.o` in *32-bit*) case $host in x86_64-*kfreebsd*-gnu) LD="${LD-ld} -m elf_i386_fbsd" ;; x86_64-*linux*) LD="${LD-ld} -m elf_i386" ;; ppc64-*linux*|powerpc64-*linux*) LD="${LD-ld} -m elf32ppclinux" ;; s390x-*linux*) LD="${LD-ld} -m elf_s390" ;; sparc64-*linux*) LD="${LD-ld} -m elf32_sparc" ;; esac ;; *64-bit*) case $host in x86_64-*kfreebsd*-gnu) LD="${LD-ld} -m elf_x86_64_fbsd" ;; x86_64-*linux*) LD="${LD-ld} -m elf_x86_64" ;; ppc*-*linux*|powerpc*-*linux*) LD="${LD-ld} -m elf64ppc" ;; s390*-*linux*|s390*-*tpf*) LD="${LD-ld} -m elf64_s390" ;; sparc*-*linux*) LD="${LD-ld} -m elf64_sparc" ;; esac ;; esac fi rm -rf conftest* ;; *-*-sco3.2v5*) # On SCO OpenServer 5, we need -belf to get full-featured binaries. SAVE_CFLAGS="$CFLAGS" CFLAGS="$CFLAGS -belf" { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether the C compiler needs -belf" >&5 $as_echo_n "checking whether the C compiler needs -belf... " >&6; } if ${lt_cv_cc_needs_belf+:} false; then : $as_echo_n "(cached) " >&6 else ac_ext=c ac_cpp='$CPP $CPPFLAGS' ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_c_compiler_gnu cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : lt_cv_cc_needs_belf=yes else lt_cv_cc_needs_belf=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext ac_ext=c ac_cpp='$CPP $CPPFLAGS' ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_c_compiler_gnu fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_cc_needs_belf" >&5 $as_echo "$lt_cv_cc_needs_belf" >&6; } if test x"$lt_cv_cc_needs_belf" != x"yes"; then # this is probably gcc 2.8.0, egcs 1.0 or newer; no need for -belf CFLAGS="$SAVE_CFLAGS" fi ;; *-*solaris*) # Find out which ABI we are using. echo 'int i;' > conftest.$ac_ext if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_compile\""; } >&5 (eval $ac_compile) 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; }; then case `/usr/bin/file conftest.o` in *64-bit*) case $lt_cv_prog_gnu_ld in yes*) case $host in i?86-*-solaris*) LD="${LD-ld} -m elf_x86_64" ;; sparc*-*-solaris*) LD="${LD-ld} -m elf64_sparc" ;; esac # GNU ld 2.21 introduced _sol2 emulations. Use them if available. if ${LD-ld} -V | grep _sol2 >/dev/null 2>&1; then LD="${LD-ld}_sol2" fi ;; *) if ${LD-ld} -64 -r -o conftest2.o conftest.o >/dev/null 2>&1; then LD="${LD-ld} -64" fi ;; esac ;; esac fi rm -rf conftest* ;; esac need_locks="$enable_libtool_lock" if test -n "$ac_tool_prefix"; then # Extract the first word of "${ac_tool_prefix}mt", so it can be a program name with args. set dummy ${ac_tool_prefix}mt; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_MANIFEST_TOOL+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$MANIFEST_TOOL"; then ac_cv_prog_MANIFEST_TOOL="$MANIFEST_TOOL" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_prog_MANIFEST_TOOL="${ac_tool_prefix}mt" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi MANIFEST_TOOL=$ac_cv_prog_MANIFEST_TOOL if test -n "$MANIFEST_TOOL"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $MANIFEST_TOOL" >&5 $as_echo "$MANIFEST_TOOL" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi fi if test -z "$ac_cv_prog_MANIFEST_TOOL"; then ac_ct_MANIFEST_TOOL=$MANIFEST_TOOL # Extract the first word of "mt", so it can be a program name with args. set dummy mt; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_ac_ct_MANIFEST_TOOL+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$ac_ct_MANIFEST_TOOL"; then ac_cv_prog_ac_ct_MANIFEST_TOOL="$ac_ct_MANIFEST_TOOL" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_prog_ac_ct_MANIFEST_TOOL="mt" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi ac_ct_MANIFEST_TOOL=$ac_cv_prog_ac_ct_MANIFEST_TOOL if test -n "$ac_ct_MANIFEST_TOOL"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_MANIFEST_TOOL" >&5 $as_echo "$ac_ct_MANIFEST_TOOL" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi if test "x$ac_ct_MANIFEST_TOOL" = x; then MANIFEST_TOOL=":" else case $cross_compiling:$ac_tool_warned in yes:) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 $as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} ac_tool_warned=yes ;; esac MANIFEST_TOOL=$ac_ct_MANIFEST_TOOL fi else MANIFEST_TOOL="$ac_cv_prog_MANIFEST_TOOL" fi test -z "$MANIFEST_TOOL" && MANIFEST_TOOL=mt { $as_echo "$as_me:${as_lineno-$LINENO}: checking if $MANIFEST_TOOL is a manifest tool" >&5 $as_echo_n "checking if $MANIFEST_TOOL is a manifest tool... " >&6; } if ${lt_cv_path_mainfest_tool+:} false; then : $as_echo_n "(cached) " >&6 else lt_cv_path_mainfest_tool=no echo "$as_me:$LINENO: $MANIFEST_TOOL '-?'" >&5 $MANIFEST_TOOL '-?' 2>conftest.err > conftest.out cat conftest.err >&5 if $GREP 'Manifest Tool' conftest.out > /dev/null; then lt_cv_path_mainfest_tool=yes fi rm -f conftest* fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_path_mainfest_tool" >&5 $as_echo "$lt_cv_path_mainfest_tool" >&6; } if test "x$lt_cv_path_mainfest_tool" != xyes; then MANIFEST_TOOL=: fi case $host_os in rhapsody* | darwin*) if test -n "$ac_tool_prefix"; then # Extract the first word of "${ac_tool_prefix}dsymutil", so it can be a program name with args. set dummy ${ac_tool_prefix}dsymutil; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_DSYMUTIL+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$DSYMUTIL"; then ac_cv_prog_DSYMUTIL="$DSYMUTIL" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_prog_DSYMUTIL="${ac_tool_prefix}dsymutil" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi DSYMUTIL=$ac_cv_prog_DSYMUTIL if test -n "$DSYMUTIL"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $DSYMUTIL" >&5 $as_echo "$DSYMUTIL" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi fi if test -z "$ac_cv_prog_DSYMUTIL"; then ac_ct_DSYMUTIL=$DSYMUTIL # Extract the first word of "dsymutil", so it can be a program name with args. set dummy dsymutil; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_ac_ct_DSYMUTIL+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$ac_ct_DSYMUTIL"; then ac_cv_prog_ac_ct_DSYMUTIL="$ac_ct_DSYMUTIL" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_prog_ac_ct_DSYMUTIL="dsymutil" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi ac_ct_DSYMUTIL=$ac_cv_prog_ac_ct_DSYMUTIL if test -n "$ac_ct_DSYMUTIL"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_DSYMUTIL" >&5 $as_echo "$ac_ct_DSYMUTIL" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi if test "x$ac_ct_DSYMUTIL" = x; then DSYMUTIL=":" else case $cross_compiling:$ac_tool_warned in yes:) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 $as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} ac_tool_warned=yes ;; esac DSYMUTIL=$ac_ct_DSYMUTIL fi else DSYMUTIL="$ac_cv_prog_DSYMUTIL" fi if test -n "$ac_tool_prefix"; then # Extract the first word of "${ac_tool_prefix}nmedit", so it can be a program name with args. set dummy ${ac_tool_prefix}nmedit; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_NMEDIT+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$NMEDIT"; then ac_cv_prog_NMEDIT="$NMEDIT" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_prog_NMEDIT="${ac_tool_prefix}nmedit" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi NMEDIT=$ac_cv_prog_NMEDIT if test -n "$NMEDIT"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $NMEDIT" >&5 $as_echo "$NMEDIT" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi fi if test -z "$ac_cv_prog_NMEDIT"; then ac_ct_NMEDIT=$NMEDIT # Extract the first word of "nmedit", so it can be a program name with args. set dummy nmedit; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_ac_ct_NMEDIT+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$ac_ct_NMEDIT"; then ac_cv_prog_ac_ct_NMEDIT="$ac_ct_NMEDIT" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_prog_ac_ct_NMEDIT="nmedit" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi ac_ct_NMEDIT=$ac_cv_prog_ac_ct_NMEDIT if test -n "$ac_ct_NMEDIT"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_NMEDIT" >&5 $as_echo "$ac_ct_NMEDIT" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi if test "x$ac_ct_NMEDIT" = x; then NMEDIT=":" else case $cross_compiling:$ac_tool_warned in yes:) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 $as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} ac_tool_warned=yes ;; esac NMEDIT=$ac_ct_NMEDIT fi else NMEDIT="$ac_cv_prog_NMEDIT" fi if test -n "$ac_tool_prefix"; then # Extract the first word of "${ac_tool_prefix}lipo", so it can be a program name with args. set dummy ${ac_tool_prefix}lipo; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_LIPO+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$LIPO"; then ac_cv_prog_LIPO="$LIPO" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_prog_LIPO="${ac_tool_prefix}lipo" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi LIPO=$ac_cv_prog_LIPO if test -n "$LIPO"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $LIPO" >&5 $as_echo "$LIPO" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi fi if test -z "$ac_cv_prog_LIPO"; then ac_ct_LIPO=$LIPO # Extract the first word of "lipo", so it can be a program name with args. set dummy lipo; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_ac_ct_LIPO+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$ac_ct_LIPO"; then ac_cv_prog_ac_ct_LIPO="$ac_ct_LIPO" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_prog_ac_ct_LIPO="lipo" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi ac_ct_LIPO=$ac_cv_prog_ac_ct_LIPO if test -n "$ac_ct_LIPO"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_LIPO" >&5 $as_echo "$ac_ct_LIPO" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi if test "x$ac_ct_LIPO" = x; then LIPO=":" else case $cross_compiling:$ac_tool_warned in yes:) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 $as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} ac_tool_warned=yes ;; esac LIPO=$ac_ct_LIPO fi else LIPO="$ac_cv_prog_LIPO" fi if test -n "$ac_tool_prefix"; then # Extract the first word of "${ac_tool_prefix}otool", so it can be a program name with args. set dummy ${ac_tool_prefix}otool; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_OTOOL+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$OTOOL"; then ac_cv_prog_OTOOL="$OTOOL" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_prog_OTOOL="${ac_tool_prefix}otool" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi OTOOL=$ac_cv_prog_OTOOL if test -n "$OTOOL"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $OTOOL" >&5 $as_echo "$OTOOL" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi fi if test -z "$ac_cv_prog_OTOOL"; then ac_ct_OTOOL=$OTOOL # Extract the first word of "otool", so it can be a program name with args. set dummy otool; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_ac_ct_OTOOL+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$ac_ct_OTOOL"; then ac_cv_prog_ac_ct_OTOOL="$ac_ct_OTOOL" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_prog_ac_ct_OTOOL="otool" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi ac_ct_OTOOL=$ac_cv_prog_ac_ct_OTOOL if test -n "$ac_ct_OTOOL"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_OTOOL" >&5 $as_echo "$ac_ct_OTOOL" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi if test "x$ac_ct_OTOOL" = x; then OTOOL=":" else case $cross_compiling:$ac_tool_warned in yes:) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 $as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} ac_tool_warned=yes ;; esac OTOOL=$ac_ct_OTOOL fi else OTOOL="$ac_cv_prog_OTOOL" fi if test -n "$ac_tool_prefix"; then # Extract the first word of "${ac_tool_prefix}otool64", so it can be a program name with args. set dummy ${ac_tool_prefix}otool64; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_OTOOL64+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$OTOOL64"; then ac_cv_prog_OTOOL64="$OTOOL64" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_prog_OTOOL64="${ac_tool_prefix}otool64" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi OTOOL64=$ac_cv_prog_OTOOL64 if test -n "$OTOOL64"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $OTOOL64" >&5 $as_echo "$OTOOL64" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi fi if test -z "$ac_cv_prog_OTOOL64"; then ac_ct_OTOOL64=$OTOOL64 # Extract the first word of "otool64", so it can be a program name with args. set dummy otool64; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_ac_ct_OTOOL64+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$ac_ct_OTOOL64"; then ac_cv_prog_ac_ct_OTOOL64="$ac_ct_OTOOL64" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_prog_ac_ct_OTOOL64="otool64" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi ac_ct_OTOOL64=$ac_cv_prog_ac_ct_OTOOL64 if test -n "$ac_ct_OTOOL64"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_OTOOL64" >&5 $as_echo "$ac_ct_OTOOL64" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi if test "x$ac_ct_OTOOL64" = x; then OTOOL64=":" else case $cross_compiling:$ac_tool_warned in yes:) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 $as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} ac_tool_warned=yes ;; esac OTOOL64=$ac_ct_OTOOL64 fi else OTOOL64="$ac_cv_prog_OTOOL64" fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking for -single_module linker flag" >&5 $as_echo_n "checking for -single_module linker flag... " >&6; } if ${lt_cv_apple_cc_single_mod+:} false; then : $as_echo_n "(cached) " >&6 else lt_cv_apple_cc_single_mod=no if test -z "${LT_MULTI_MODULE}"; then # By default we will add the -single_module flag. You can override # by either setting the environment variable LT_MULTI_MODULE # non-empty at configure time, or by adding -multi_module to the # link flags. rm -rf libconftest.dylib* echo "int foo(void){return 1;}" > conftest.c echo "$LTCC $LTCFLAGS $LDFLAGS -o libconftest.dylib \ -dynamiclib -Wl,-single_module conftest.c" >&5 $LTCC $LTCFLAGS $LDFLAGS -o libconftest.dylib \ -dynamiclib -Wl,-single_module conftest.c 2>conftest.err _lt_result=$? # If there is a non-empty error log, and "single_module" # appears in it, assume the flag caused a linker warning if test -s conftest.err && $GREP single_module conftest.err; then cat conftest.err >&5 # Otherwise, if the output was created with a 0 exit code from # the compiler, it worked. elif test -f libconftest.dylib && test $_lt_result -eq 0; then lt_cv_apple_cc_single_mod=yes else cat conftest.err >&5 fi rm -rf libconftest.dylib* rm -f conftest.* fi fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_apple_cc_single_mod" >&5 $as_echo "$lt_cv_apple_cc_single_mod" >&6; } { $as_echo "$as_me:${as_lineno-$LINENO}: checking for -exported_symbols_list linker flag" >&5 $as_echo_n "checking for -exported_symbols_list linker flag... " >&6; } if ${lt_cv_ld_exported_symbols_list+:} false; then : $as_echo_n "(cached) " >&6 else lt_cv_ld_exported_symbols_list=no save_LDFLAGS=$LDFLAGS echo "_main" > conftest.sym LDFLAGS="$LDFLAGS -Wl,-exported_symbols_list,conftest.sym" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : lt_cv_ld_exported_symbols_list=yes else lt_cv_ld_exported_symbols_list=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext LDFLAGS="$save_LDFLAGS" fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_ld_exported_symbols_list" >&5 $as_echo "$lt_cv_ld_exported_symbols_list" >&6; } { $as_echo "$as_me:${as_lineno-$LINENO}: checking for -force_load linker flag" >&5 $as_echo_n "checking for -force_load linker flag... " >&6; } if ${lt_cv_ld_force_load+:} false; then : $as_echo_n "(cached) " >&6 else lt_cv_ld_force_load=no cat > conftest.c << _LT_EOF int forced_loaded() { return 2;} _LT_EOF echo "$LTCC $LTCFLAGS -c -o conftest.o conftest.c" >&5 $LTCC $LTCFLAGS -c -o conftest.o conftest.c 2>&5 echo "$AR cru libconftest.a conftest.o" >&5 $AR cru libconftest.a conftest.o 2>&5 echo "$RANLIB libconftest.a" >&5 $RANLIB libconftest.a 2>&5 cat > conftest.c << _LT_EOF int main() { return 0;} _LT_EOF echo "$LTCC $LTCFLAGS $LDFLAGS -o conftest conftest.c -Wl,-force_load,./libconftest.a" >&5 $LTCC $LTCFLAGS $LDFLAGS -o conftest conftest.c -Wl,-force_load,./libconftest.a 2>conftest.err _lt_result=$? if test -s conftest.err && $GREP force_load conftest.err; then cat conftest.err >&5 elif test -f conftest && test $_lt_result -eq 0 && $GREP forced_load conftest >/dev/null 2>&1 ; then lt_cv_ld_force_load=yes else cat conftest.err >&5 fi rm -f conftest.err libconftest.a conftest conftest.c rm -rf conftest.dSYM fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_ld_force_load" >&5 $as_echo "$lt_cv_ld_force_load" >&6; } case $host_os in rhapsody* | darwin1.[012]) _lt_dar_allow_undefined='${wl}-undefined ${wl}suppress' ;; darwin1.*) _lt_dar_allow_undefined='${wl}-flat_namespace ${wl}-undefined ${wl}suppress' ;; darwin*) # darwin 5.x on # if running on 10.5 or later, the deployment target defaults # to the OS version, if on x86, and 10.4, the deployment # target defaults to 10.4. Don't you love it? case ${MACOSX_DEPLOYMENT_TARGET-10.0},$host in 10.0,*86*-darwin8*|10.0,*-darwin[91]*) _lt_dar_allow_undefined='${wl}-undefined ${wl}dynamic_lookup' ;; 10.[012]*) _lt_dar_allow_undefined='${wl}-flat_namespace ${wl}-undefined ${wl}suppress' ;; 10.*) _lt_dar_allow_undefined='${wl}-undefined ${wl}dynamic_lookup' ;; esac ;; esac if test "$lt_cv_apple_cc_single_mod" = "yes"; then _lt_dar_single_mod='$single_module' fi if test "$lt_cv_ld_exported_symbols_list" = "yes"; then _lt_dar_export_syms=' ${wl}-exported_symbols_list,$output_objdir/${libname}-symbols.expsym' else _lt_dar_export_syms='~$NMEDIT -s $output_objdir/${libname}-symbols.expsym ${lib}' fi if test "$DSYMUTIL" != ":" && test "$lt_cv_ld_force_load" = "no"; then _lt_dsymutil='~$DSYMUTIL $lib || :' else _lt_dsymutil= fi ;; esac { $as_echo "$as_me:${as_lineno-$LINENO}: checking for ANSI C header files" >&5 $as_echo_n "checking for ANSI C header files... " >&6; } if ${ac_cv_header_stdc+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include #include #include #include int main () { ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : ac_cv_header_stdc=yes else ac_cv_header_stdc=no fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext if test $ac_cv_header_stdc = yes; then # SunOS 4.x string.h does not declare mem*, contrary to ANSI. cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include _ACEOF if (eval "$ac_cpp conftest.$ac_ext") 2>&5 | $EGREP "memchr" >/dev/null 2>&1; then : else ac_cv_header_stdc=no fi rm -f conftest* fi if test $ac_cv_header_stdc = yes; then # ISC 2.0.2 stdlib.h does not declare free, contrary to ANSI. cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include _ACEOF if (eval "$ac_cpp conftest.$ac_ext") 2>&5 | $EGREP "free" >/dev/null 2>&1; then : else ac_cv_header_stdc=no fi rm -f conftest* fi if test $ac_cv_header_stdc = yes; then # /bin/cc in Irix-4.0.5 gets non-ANSI ctype macros unless using -ansi. if test "$cross_compiling" = yes; then : : else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include #include #if ((' ' & 0x0FF) == 0x020) # define ISLOWER(c) ('a' <= (c) && (c) <= 'z') # define TOUPPER(c) (ISLOWER(c) ? 'A' + ((c) - 'a') : (c)) #else # define ISLOWER(c) \ (('a' <= (c) && (c) <= 'i') \ || ('j' <= (c) && (c) <= 'r') \ || ('s' <= (c) && (c) <= 'z')) # define TOUPPER(c) (ISLOWER(c) ? ((c) | 0x40) : (c)) #endif #define XOR(e, f) (((e) && !(f)) || (!(e) && (f))) int main () { int i; for (i = 0; i < 256; i++) if (XOR (islower (i), ISLOWER (i)) || toupper (i) != TOUPPER (i)) return 2; return 0; } _ACEOF if ac_fn_c_try_run "$LINENO"; then : else ac_cv_header_stdc=no fi rm -f core *.core core.conftest.* gmon.out bb.out conftest$ac_exeext \ conftest.$ac_objext conftest.beam conftest.$ac_ext fi fi fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_header_stdc" >&5 $as_echo "$ac_cv_header_stdc" >&6; } if test $ac_cv_header_stdc = yes; then $as_echo "#define STDC_HEADERS 1" >>confdefs.h fi # On IRIX 5.3, sys/types and inttypes.h are conflicting. for ac_header in sys/types.h sys/stat.h stdlib.h string.h memory.h strings.h \ inttypes.h stdint.h unistd.h do : as_ac_Header=`$as_echo "ac_cv_header_$ac_header" | $as_tr_sh` ac_fn_c_check_header_compile "$LINENO" "$ac_header" "$as_ac_Header" "$ac_includes_default " if eval test \"x\$"$as_ac_Header"\" = x"yes"; then : cat >>confdefs.h <<_ACEOF #define `$as_echo "HAVE_$ac_header" | $as_tr_cpp` 1 _ACEOF fi done for ac_header in dlfcn.h do : ac_fn_c_check_header_compile "$LINENO" "dlfcn.h" "ac_cv_header_dlfcn_h" "$ac_includes_default " if test "x$ac_cv_header_dlfcn_h" = xyes; then : cat >>confdefs.h <<_ACEOF #define HAVE_DLFCN_H 1 _ACEOF fi done func_stripname_cnf () { case ${2} in .*) func_stripname_result=`$ECHO "${3}" | $SED "s%^${1}%%; s%\\\\${2}\$%%"`;; *) func_stripname_result=`$ECHO "${3}" | $SED "s%^${1}%%; s%${2}\$%%"`;; esac } # func_stripname_cnf # Set options enable_dlopen=no enable_win32_dll=no # Check whether --enable-shared was given. if test "${enable_shared+set}" = set; then : enableval=$enable_shared; p=${PACKAGE-default} case $enableval in yes) enable_shared=yes ;; no) enable_shared=no ;; *) enable_shared=no # Look at the argument we got. We use all the common list separators. lt_save_ifs="$IFS"; IFS="${IFS}$PATH_SEPARATOR," for pkg in $enableval; do IFS="$lt_save_ifs" if test "X$pkg" = "X$p"; then enable_shared=yes fi done IFS="$lt_save_ifs" ;; esac else enable_shared=yes fi # Check whether --enable-static was given. if test "${enable_static+set}" = set; then : enableval=$enable_static; p=${PACKAGE-default} case $enableval in yes) enable_static=yes ;; no) enable_static=no ;; *) enable_static=no # Look at the argument we got. We use all the common list separators. lt_save_ifs="$IFS"; IFS="${IFS}$PATH_SEPARATOR," for pkg in $enableval; do IFS="$lt_save_ifs" if test "X$pkg" = "X$p"; then enable_static=yes fi done IFS="$lt_save_ifs" ;; esac else enable_static=yes fi # Check whether --with-pic was given. if test "${with_pic+set}" = set; then : withval=$with_pic; lt_p=${PACKAGE-default} case $withval in yes|no) pic_mode=$withval ;; *) pic_mode=default # Look at the argument we got. We use all the common list separators. lt_save_ifs="$IFS"; IFS="${IFS}$PATH_SEPARATOR," for lt_pkg in $withval; do IFS="$lt_save_ifs" if test "X$lt_pkg" = "X$lt_p"; then pic_mode=yes fi done IFS="$lt_save_ifs" ;; esac else pic_mode=default fi test -z "$pic_mode" && pic_mode=default # Check whether --enable-fast-install was given. if test "${enable_fast_install+set}" = set; then : enableval=$enable_fast_install; p=${PACKAGE-default} case $enableval in yes) enable_fast_install=yes ;; no) enable_fast_install=no ;; *) enable_fast_install=no # Look at the argument we got. We use all the common list separators. lt_save_ifs="$IFS"; IFS="${IFS}$PATH_SEPARATOR," for pkg in $enableval; do IFS="$lt_save_ifs" if test "X$pkg" = "X$p"; then enable_fast_install=yes fi done IFS="$lt_save_ifs" ;; esac else enable_fast_install=yes fi # This can be used to rebuild libtool when needed LIBTOOL_DEPS="$ltmain" # Always use our own libtool. LIBTOOL='$(SHELL) $(top_builddir)/libtool' test -z "$LN_S" && LN_S="ln -s" if test -n "${ZSH_VERSION+set}" ; then setopt NO_GLOB_SUBST fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking for objdir" >&5 $as_echo_n "checking for objdir... " >&6; } if ${lt_cv_objdir+:} false; then : $as_echo_n "(cached) " >&6 else rm -f .libs 2>/dev/null mkdir .libs 2>/dev/null if test -d .libs; then lt_cv_objdir=.libs else # MS-DOS does not allow filenames that begin with a dot. lt_cv_objdir=_libs fi rmdir .libs 2>/dev/null fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_objdir" >&5 $as_echo "$lt_cv_objdir" >&6; } objdir=$lt_cv_objdir cat >>confdefs.h <<_ACEOF #define LT_OBJDIR "$lt_cv_objdir/" _ACEOF case $host_os in aix3*) # AIX sometimes has problems with the GCC collect2 program. For some # reason, if we set the COLLECT_NAMES environment variable, the problems # vanish in a puff of smoke. if test "X${COLLECT_NAMES+set}" != Xset; then COLLECT_NAMES= export COLLECT_NAMES fi ;; esac # Global variables: ofile=libtool can_build_shared=yes # All known linkers require a `.a' archive for static linking (except MSVC, # which needs '.lib'). libext=a with_gnu_ld="$lt_cv_prog_gnu_ld" old_CC="$CC" old_CFLAGS="$CFLAGS" # Set sane defaults for various variables test -z "$CC" && CC=cc test -z "$LTCC" && LTCC=$CC test -z "$LTCFLAGS" && LTCFLAGS=$CFLAGS test -z "$LD" && LD=ld test -z "$ac_objext" && ac_objext=o for cc_temp in $compiler""; do case $cc_temp in compile | *[\\/]compile | ccache | *[\\/]ccache ) ;; distcc | *[\\/]distcc | purify | *[\\/]purify ) ;; \-*) ;; *) break;; esac done cc_basename=`$ECHO "$cc_temp" | $SED "s%.*/%%; s%^$host_alias-%%"` # Only perform the check for file, if the check method requires it test -z "$MAGIC_CMD" && MAGIC_CMD=file case $deplibs_check_method in file_magic*) if test "$file_magic_cmd" = '$MAGIC_CMD'; then { $as_echo "$as_me:${as_lineno-$LINENO}: checking for ${ac_tool_prefix}file" >&5 $as_echo_n "checking for ${ac_tool_prefix}file... " >&6; } if ${lt_cv_path_MAGIC_CMD+:} false; then : $as_echo_n "(cached) " >&6 else case $MAGIC_CMD in [\\/*] | ?:[\\/]*) lt_cv_path_MAGIC_CMD="$MAGIC_CMD" # Let the user override the test with a path. ;; *) lt_save_MAGIC_CMD="$MAGIC_CMD" lt_save_ifs="$IFS"; IFS=$PATH_SEPARATOR ac_dummy="/usr/bin$PATH_SEPARATOR$PATH" for ac_dir in $ac_dummy; do IFS="$lt_save_ifs" test -z "$ac_dir" && ac_dir=. if test -f $ac_dir/${ac_tool_prefix}file; then lt_cv_path_MAGIC_CMD="$ac_dir/${ac_tool_prefix}file" if test -n "$file_magic_test_file"; then case $deplibs_check_method in "file_magic "*) file_magic_regex=`expr "$deplibs_check_method" : "file_magic \(.*\)"` MAGIC_CMD="$lt_cv_path_MAGIC_CMD" if eval $file_magic_cmd \$file_magic_test_file 2> /dev/null | $EGREP "$file_magic_regex" > /dev/null; then : else cat <<_LT_EOF 1>&2 *** Warning: the command libtool uses to detect shared libraries, *** $file_magic_cmd, produces output that libtool cannot recognize. *** The result is that libtool may fail to recognize shared libraries *** as such. This will affect the creation of libtool libraries that *** depend on shared libraries, but programs linked with such libtool *** libraries will work regardless of this problem. Nevertheless, you *** may want to report the problem to your system manager and/or to *** bug-libtool@gnu.org _LT_EOF fi ;; esac fi break fi done IFS="$lt_save_ifs" MAGIC_CMD="$lt_save_MAGIC_CMD" ;; esac fi MAGIC_CMD="$lt_cv_path_MAGIC_CMD" if test -n "$MAGIC_CMD"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $MAGIC_CMD" >&5 $as_echo "$MAGIC_CMD" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi if test -z "$lt_cv_path_MAGIC_CMD"; then if test -n "$ac_tool_prefix"; then { $as_echo "$as_me:${as_lineno-$LINENO}: checking for file" >&5 $as_echo_n "checking for file... " >&6; } if ${lt_cv_path_MAGIC_CMD+:} false; then : $as_echo_n "(cached) " >&6 else case $MAGIC_CMD in [\\/*] | ?:[\\/]*) lt_cv_path_MAGIC_CMD="$MAGIC_CMD" # Let the user override the test with a path. ;; *) lt_save_MAGIC_CMD="$MAGIC_CMD" lt_save_ifs="$IFS"; IFS=$PATH_SEPARATOR ac_dummy="/usr/bin$PATH_SEPARATOR$PATH" for ac_dir in $ac_dummy; do IFS="$lt_save_ifs" test -z "$ac_dir" && ac_dir=. if test -f $ac_dir/file; then lt_cv_path_MAGIC_CMD="$ac_dir/file" if test -n "$file_magic_test_file"; then case $deplibs_check_method in "file_magic "*) file_magic_regex=`expr "$deplibs_check_method" : "file_magic \(.*\)"` MAGIC_CMD="$lt_cv_path_MAGIC_CMD" if eval $file_magic_cmd \$file_magic_test_file 2> /dev/null | $EGREP "$file_magic_regex" > /dev/null; then : else cat <<_LT_EOF 1>&2 *** Warning: the command libtool uses to detect shared libraries, *** $file_magic_cmd, produces output that libtool cannot recognize. *** The result is that libtool may fail to recognize shared libraries *** as such. This will affect the creation of libtool libraries that *** depend on shared libraries, but programs linked with such libtool *** libraries will work regardless of this problem. Nevertheless, you *** may want to report the problem to your system manager and/or to *** bug-libtool@gnu.org _LT_EOF fi ;; esac fi break fi done IFS="$lt_save_ifs" MAGIC_CMD="$lt_save_MAGIC_CMD" ;; esac fi MAGIC_CMD="$lt_cv_path_MAGIC_CMD" if test -n "$MAGIC_CMD"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $MAGIC_CMD" >&5 $as_echo "$MAGIC_CMD" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi else MAGIC_CMD=: fi fi fi ;; esac # Use C for the default configuration in the libtool script lt_save_CC="$CC" ac_ext=c ac_cpp='$CPP $CPPFLAGS' ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_c_compiler_gnu # Source file extension for C test sources. ac_ext=c # Object file extension for compiled C test sources. objext=o objext=$objext # Code to be used in simple compile tests lt_simple_compile_test_code="int some_variable = 0;" # Code to be used in simple link tests lt_simple_link_test_code='int main(){return(0);}' # If no C compiler was specified, use CC. LTCC=${LTCC-"$CC"} # If no C compiler flags were specified, use CFLAGS. LTCFLAGS=${LTCFLAGS-"$CFLAGS"} # Allow CC to be a program name with arguments. compiler=$CC # Save the default compiler, since it gets overwritten when the other # tags are being tested, and _LT_TAGVAR(compiler, []) is a NOP. compiler_DEFAULT=$CC # save warnings/boilerplate of simple test code ac_outfile=conftest.$ac_objext echo "$lt_simple_compile_test_code" >conftest.$ac_ext eval "$ac_compile" 2>&1 >/dev/null | $SED '/^$/d; /^ *+/d' >conftest.err _lt_compiler_boilerplate=`cat conftest.err` $RM conftest* ac_outfile=conftest.$ac_objext echo "$lt_simple_link_test_code" >conftest.$ac_ext eval "$ac_link" 2>&1 >/dev/null | $SED '/^$/d; /^ *+/d' >conftest.err _lt_linker_boilerplate=`cat conftest.err` $RM -r conftest* ## CAVEAT EMPTOR: ## There is no encapsulation within the following macros, do not change ## the running order or otherwise move them around unless you know exactly ## what you are doing... if test -n "$compiler"; then lt_prog_compiler_no_builtin_flag= if test "$GCC" = yes; then case $cc_basename in nvcc*) lt_prog_compiler_no_builtin_flag=' -Xcompiler -fno-builtin' ;; *) lt_prog_compiler_no_builtin_flag=' -fno-builtin' ;; esac { $as_echo "$as_me:${as_lineno-$LINENO}: checking if $compiler supports -fno-rtti -fno-exceptions" >&5 $as_echo_n "checking if $compiler supports -fno-rtti -fno-exceptions... " >&6; } if ${lt_cv_prog_compiler_rtti_exceptions+:} false; then : $as_echo_n "(cached) " >&6 else lt_cv_prog_compiler_rtti_exceptions=no ac_outfile=conftest.$ac_objext echo "$lt_simple_compile_test_code" > conftest.$ac_ext lt_compiler_flag="-fno-rtti -fno-exceptions" # Insert the option either (1) after the last *FLAGS variable, or # (2) before a word containing "conftest.", or (3) at the end. # Note that $ac_compile itself does not contain backslashes and begins # with a dollar sign (not a hyphen), so the echo should work correctly. # The option is referenced via a variable to avoid confusing sed. lt_compile=`echo "$ac_compile" | $SED \ -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \ -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \ -e 's:$: $lt_compiler_flag:'` (eval echo "\"\$as_me:$LINENO: $lt_compile\"" >&5) (eval "$lt_compile" 2>conftest.err) ac_status=$? cat conftest.err >&5 echo "$as_me:$LINENO: \$? = $ac_status" >&5 if (exit $ac_status) && test -s "$ac_outfile"; then # The compiler can only warn and ignore the option if not recognized # So say no if there are warnings other than the usual output. $ECHO "$_lt_compiler_boilerplate" | $SED '/^$/d' >conftest.exp $SED '/^$/d; /^ *+/d' conftest.err >conftest.er2 if test ! -s conftest.er2 || diff conftest.exp conftest.er2 >/dev/null; then lt_cv_prog_compiler_rtti_exceptions=yes fi fi $RM conftest* fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_compiler_rtti_exceptions" >&5 $as_echo "$lt_cv_prog_compiler_rtti_exceptions" >&6; } if test x"$lt_cv_prog_compiler_rtti_exceptions" = xyes; then lt_prog_compiler_no_builtin_flag="$lt_prog_compiler_no_builtin_flag -fno-rtti -fno-exceptions" else : fi fi lt_prog_compiler_wl= lt_prog_compiler_pic= lt_prog_compiler_static= if test "$GCC" = yes; then lt_prog_compiler_wl='-Wl,' lt_prog_compiler_static='-static' case $host_os in aix*) # All AIX code is PIC. if test "$host_cpu" = ia64; then # AIX 5 now supports IA64 processor lt_prog_compiler_static='-Bstatic' fi ;; amigaos*) case $host_cpu in powerpc) # see comment about AmigaOS4 .so support lt_prog_compiler_pic='-fPIC' ;; m68k) # FIXME: we need at least 68020 code to build shared libraries, but # adding the `-m68020' flag to GCC prevents building anything better, # like `-m68040'. lt_prog_compiler_pic='-m68020 -resident32 -malways-restore-a4' ;; esac ;; beos* | irix5* | irix6* | nonstopux* | osf3* | osf4* | osf5*) # PIC is the default for these OSes. ;; mingw* | cygwin* | pw32* | os2* | cegcc*) # This hack is so that the source file can tell whether it is being # built for inclusion in a dll (and should export symbols for example). # Although the cygwin gcc ignores -fPIC, still need this for old-style # (--disable-auto-import) libraries lt_prog_compiler_pic='-DDLL_EXPORT' ;; darwin* | rhapsody*) # PIC is the default on this platform # Common symbols not allowed in MH_DYLIB files lt_prog_compiler_pic='-fno-common' ;; haiku*) # PIC is the default for Haiku. # The "-static" flag exists, but is broken. lt_prog_compiler_static= ;; hpux*) # PIC is the default for 64-bit PA HP-UX, but not for 32-bit # PA HP-UX. On IA64 HP-UX, PIC is the default but the pic flag # sets the default TLS model and affects inlining. case $host_cpu in hppa*64*) # +Z the default ;; *) lt_prog_compiler_pic='-fPIC' ;; esac ;; interix[3-9]*) # Interix 3.x gcc -fpic/-fPIC options generate broken code. # Instead, we relocate shared libraries at runtime. ;; msdosdjgpp*) # Just because we use GCC doesn't mean we suddenly get shared libraries # on systems that don't support them. lt_prog_compiler_can_build_shared=no enable_shared=no ;; *nto* | *qnx*) # QNX uses GNU C++, but need to define -shared option too, otherwise # it will coredump. lt_prog_compiler_pic='-fPIC -shared' ;; sysv4*MP*) if test -d /usr/nec; then lt_prog_compiler_pic=-Kconform_pic fi ;; *) lt_prog_compiler_pic='-fPIC' ;; esac case $cc_basename in nvcc*) # Cuda Compiler Driver 2.2 lt_prog_compiler_wl='-Xlinker ' if test -n "$lt_prog_compiler_pic"; then lt_prog_compiler_pic="-Xcompiler $lt_prog_compiler_pic" fi ;; esac else # PORTME Check for flag to pass linker flags through the system compiler. case $host_os in aix*) lt_prog_compiler_wl='-Wl,' if test "$host_cpu" = ia64; then # AIX 5 now supports IA64 processor lt_prog_compiler_static='-Bstatic' else lt_prog_compiler_static='-bnso -bI:/lib/syscalls.exp' fi ;; mingw* | cygwin* | pw32* | os2* | cegcc*) # This hack is so that the source file can tell whether it is being # built for inclusion in a dll (and should export symbols for example). lt_prog_compiler_pic='-DDLL_EXPORT' ;; hpux9* | hpux10* | hpux11*) lt_prog_compiler_wl='-Wl,' # PIC is the default for IA64 HP-UX and 64-bit HP-UX, but # not for PA HP-UX. case $host_cpu in hppa*64*|ia64*) # +Z the default ;; *) lt_prog_compiler_pic='+Z' ;; esac # Is there a better lt_prog_compiler_static that works with the bundled CC? lt_prog_compiler_static='${wl}-a ${wl}archive' ;; irix5* | irix6* | nonstopux*) lt_prog_compiler_wl='-Wl,' # PIC (with -KPIC) is the default. lt_prog_compiler_static='-non_shared' ;; linux* | k*bsd*-gnu | kopensolaris*-gnu) case $cc_basename in # old Intel for x86_64 which still supported -KPIC. ecc*) lt_prog_compiler_wl='-Wl,' lt_prog_compiler_pic='-KPIC' lt_prog_compiler_static='-static' ;; # icc used to be incompatible with GCC. # ICC 10 doesn't accept -KPIC any more. icc* | ifort*) lt_prog_compiler_wl='-Wl,' lt_prog_compiler_pic='-fPIC' lt_prog_compiler_static='-static' ;; # Lahey Fortran 8.1. lf95*) lt_prog_compiler_wl='-Wl,' lt_prog_compiler_pic='--shared' lt_prog_compiler_static='--static' ;; nagfor*) # NAG Fortran compiler lt_prog_compiler_wl='-Wl,-Wl,,' lt_prog_compiler_pic='-PIC' lt_prog_compiler_static='-Bstatic' ;; pgcc* | pgf77* | pgf90* | pgf95* | pgfortran*) # Portland Group compilers (*not* the Pentium gcc compiler, # which looks to be a dead project) lt_prog_compiler_wl='-Wl,' lt_prog_compiler_pic='-fpic' lt_prog_compiler_static='-Bstatic' ;; ccc*) lt_prog_compiler_wl='-Wl,' # All Alpha code is PIC. lt_prog_compiler_static='-non_shared' ;; xl* | bgxl* | bgf* | mpixl*) # IBM XL C 8.0/Fortran 10.1, 11.1 on PPC and BlueGene lt_prog_compiler_wl='-Wl,' lt_prog_compiler_pic='-qpic' lt_prog_compiler_static='-qstaticlink' ;; *) case `$CC -V 2>&1 | sed 5q` in *Sun\ Ceres\ Fortran* | *Sun*Fortran*\ [1-7].* | *Sun*Fortran*\ 8.[0-3]*) # Sun Fortran 8.3 passes all unrecognized flags to the linker lt_prog_compiler_pic='-KPIC' lt_prog_compiler_static='-Bstatic' lt_prog_compiler_wl='' ;; *Sun\ F* | *Sun*Fortran*) lt_prog_compiler_pic='-KPIC' lt_prog_compiler_static='-Bstatic' lt_prog_compiler_wl='-Qoption ld ' ;; *Sun\ C*) # Sun C 5.9 lt_prog_compiler_pic='-KPIC' lt_prog_compiler_static='-Bstatic' lt_prog_compiler_wl='-Wl,' ;; *Intel*\ [CF]*Compiler*) lt_prog_compiler_wl='-Wl,' lt_prog_compiler_pic='-fPIC' lt_prog_compiler_static='-static' ;; *Portland\ Group*) lt_prog_compiler_wl='-Wl,' lt_prog_compiler_pic='-fpic' lt_prog_compiler_static='-Bstatic' ;; esac ;; esac ;; newsos6) lt_prog_compiler_pic='-KPIC' lt_prog_compiler_static='-Bstatic' ;; *nto* | *qnx*) # QNX uses GNU C++, but need to define -shared option too, otherwise # it will coredump. lt_prog_compiler_pic='-fPIC -shared' ;; osf3* | osf4* | osf5*) lt_prog_compiler_wl='-Wl,' # All OSF/1 code is PIC. lt_prog_compiler_static='-non_shared' ;; rdos*) lt_prog_compiler_static='-non_shared' ;; solaris*) lt_prog_compiler_pic='-KPIC' lt_prog_compiler_static='-Bstatic' case $cc_basename in f77* | f90* | f95* | sunf77* | sunf90* | sunf95*) lt_prog_compiler_wl='-Qoption ld ';; *) lt_prog_compiler_wl='-Wl,';; esac ;; sunos4*) lt_prog_compiler_wl='-Qoption ld ' lt_prog_compiler_pic='-PIC' lt_prog_compiler_static='-Bstatic' ;; sysv4 | sysv4.2uw2* | sysv4.3*) lt_prog_compiler_wl='-Wl,' lt_prog_compiler_pic='-KPIC' lt_prog_compiler_static='-Bstatic' ;; sysv4*MP*) if test -d /usr/nec ;then lt_prog_compiler_pic='-Kconform_pic' lt_prog_compiler_static='-Bstatic' fi ;; sysv5* | unixware* | sco3.2v5* | sco5v6* | OpenUNIX*) lt_prog_compiler_wl='-Wl,' lt_prog_compiler_pic='-KPIC' lt_prog_compiler_static='-Bstatic' ;; unicos*) lt_prog_compiler_wl='-Wl,' lt_prog_compiler_can_build_shared=no ;; uts4*) lt_prog_compiler_pic='-pic' lt_prog_compiler_static='-Bstatic' ;; *) lt_prog_compiler_can_build_shared=no ;; esac fi case $host_os in # For platforms which do not support PIC, -DPIC is meaningless: *djgpp*) lt_prog_compiler_pic= ;; *) lt_prog_compiler_pic="$lt_prog_compiler_pic -DPIC" ;; esac { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $compiler option to produce PIC" >&5 $as_echo_n "checking for $compiler option to produce PIC... " >&6; } if ${lt_cv_prog_compiler_pic+:} false; then : $as_echo_n "(cached) " >&6 else lt_cv_prog_compiler_pic=$lt_prog_compiler_pic fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_compiler_pic" >&5 $as_echo "$lt_cv_prog_compiler_pic" >&6; } lt_prog_compiler_pic=$lt_cv_prog_compiler_pic # # Check to make sure the PIC flag actually works. # if test -n "$lt_prog_compiler_pic"; then { $as_echo "$as_me:${as_lineno-$LINENO}: checking if $compiler PIC flag $lt_prog_compiler_pic works" >&5 $as_echo_n "checking if $compiler PIC flag $lt_prog_compiler_pic works... " >&6; } if ${lt_cv_prog_compiler_pic_works+:} false; then : $as_echo_n "(cached) " >&6 else lt_cv_prog_compiler_pic_works=no ac_outfile=conftest.$ac_objext echo "$lt_simple_compile_test_code" > conftest.$ac_ext lt_compiler_flag="$lt_prog_compiler_pic -DPIC" # Insert the option either (1) after the last *FLAGS variable, or # (2) before a word containing "conftest.", or (3) at the end. # Note that $ac_compile itself does not contain backslashes and begins # with a dollar sign (not a hyphen), so the echo should work correctly. # The option is referenced via a variable to avoid confusing sed. lt_compile=`echo "$ac_compile" | $SED \ -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \ -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \ -e 's:$: $lt_compiler_flag:'` (eval echo "\"\$as_me:$LINENO: $lt_compile\"" >&5) (eval "$lt_compile" 2>conftest.err) ac_status=$? cat conftest.err >&5 echo "$as_me:$LINENO: \$? = $ac_status" >&5 if (exit $ac_status) && test -s "$ac_outfile"; then # The compiler can only warn and ignore the option if not recognized # So say no if there are warnings other than the usual output. $ECHO "$_lt_compiler_boilerplate" | $SED '/^$/d' >conftest.exp $SED '/^$/d; /^ *+/d' conftest.err >conftest.er2 if test ! -s conftest.er2 || diff conftest.exp conftest.er2 >/dev/null; then lt_cv_prog_compiler_pic_works=yes fi fi $RM conftest* fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_compiler_pic_works" >&5 $as_echo "$lt_cv_prog_compiler_pic_works" >&6; } if test x"$lt_cv_prog_compiler_pic_works" = xyes; then case $lt_prog_compiler_pic in "" | " "*) ;; *) lt_prog_compiler_pic=" $lt_prog_compiler_pic" ;; esac else lt_prog_compiler_pic= lt_prog_compiler_can_build_shared=no fi fi # # Check to make sure the static flag actually works. # wl=$lt_prog_compiler_wl eval lt_tmp_static_flag=\"$lt_prog_compiler_static\" { $as_echo "$as_me:${as_lineno-$LINENO}: checking if $compiler static flag $lt_tmp_static_flag works" >&5 $as_echo_n "checking if $compiler static flag $lt_tmp_static_flag works... " >&6; } if ${lt_cv_prog_compiler_static_works+:} false; then : $as_echo_n "(cached) " >&6 else lt_cv_prog_compiler_static_works=no save_LDFLAGS="$LDFLAGS" LDFLAGS="$LDFLAGS $lt_tmp_static_flag" echo "$lt_simple_link_test_code" > conftest.$ac_ext if (eval $ac_link 2>conftest.err) && test -s conftest$ac_exeext; then # The linker can only warn and ignore the option if not recognized # So say no if there are warnings if test -s conftest.err; then # Append any errors to the config.log. cat conftest.err 1>&5 $ECHO "$_lt_linker_boilerplate" | $SED '/^$/d' > conftest.exp $SED '/^$/d; /^ *+/d' conftest.err >conftest.er2 if diff conftest.exp conftest.er2 >/dev/null; then lt_cv_prog_compiler_static_works=yes fi else lt_cv_prog_compiler_static_works=yes fi fi $RM -r conftest* LDFLAGS="$save_LDFLAGS" fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_compiler_static_works" >&5 $as_echo "$lt_cv_prog_compiler_static_works" >&6; } if test x"$lt_cv_prog_compiler_static_works" = xyes; then : else lt_prog_compiler_static= fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking if $compiler supports -c -o file.$ac_objext" >&5 $as_echo_n "checking if $compiler supports -c -o file.$ac_objext... " >&6; } if ${lt_cv_prog_compiler_c_o+:} false; then : $as_echo_n "(cached) " >&6 else lt_cv_prog_compiler_c_o=no $RM -r conftest 2>/dev/null mkdir conftest cd conftest mkdir out echo "$lt_simple_compile_test_code" > conftest.$ac_ext lt_compiler_flag="-o out/conftest2.$ac_objext" # Insert the option either (1) after the last *FLAGS variable, or # (2) before a word containing "conftest.", or (3) at the end. # Note that $ac_compile itself does not contain backslashes and begins # with a dollar sign (not a hyphen), so the echo should work correctly. lt_compile=`echo "$ac_compile" | $SED \ -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \ -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \ -e 's:$: $lt_compiler_flag:'` (eval echo "\"\$as_me:$LINENO: $lt_compile\"" >&5) (eval "$lt_compile" 2>out/conftest.err) ac_status=$? cat out/conftest.err >&5 echo "$as_me:$LINENO: \$? = $ac_status" >&5 if (exit $ac_status) && test -s out/conftest2.$ac_objext then # The compiler can only warn and ignore the option if not recognized # So say no if there are warnings $ECHO "$_lt_compiler_boilerplate" | $SED '/^$/d' > out/conftest.exp $SED '/^$/d; /^ *+/d' out/conftest.err >out/conftest.er2 if test ! -s out/conftest.er2 || diff out/conftest.exp out/conftest.er2 >/dev/null; then lt_cv_prog_compiler_c_o=yes fi fi chmod u+w . 2>&5 $RM conftest* # SGI C++ compiler will create directory out/ii_files/ for # template instantiation test -d out/ii_files && $RM out/ii_files/* && rmdir out/ii_files $RM out/* && rmdir out cd .. $RM -r conftest $RM conftest* fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_compiler_c_o" >&5 $as_echo "$lt_cv_prog_compiler_c_o" >&6; } { $as_echo "$as_me:${as_lineno-$LINENO}: checking if $compiler supports -c -o file.$ac_objext" >&5 $as_echo_n "checking if $compiler supports -c -o file.$ac_objext... " >&6; } if ${lt_cv_prog_compiler_c_o+:} false; then : $as_echo_n "(cached) " >&6 else lt_cv_prog_compiler_c_o=no $RM -r conftest 2>/dev/null mkdir conftest cd conftest mkdir out echo "$lt_simple_compile_test_code" > conftest.$ac_ext lt_compiler_flag="-o out/conftest2.$ac_objext" # Insert the option either (1) after the last *FLAGS variable, or # (2) before a word containing "conftest.", or (3) at the end. # Note that $ac_compile itself does not contain backslashes and begins # with a dollar sign (not a hyphen), so the echo should work correctly. lt_compile=`echo "$ac_compile" | $SED \ -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \ -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \ -e 's:$: $lt_compiler_flag:'` (eval echo "\"\$as_me:$LINENO: $lt_compile\"" >&5) (eval "$lt_compile" 2>out/conftest.err) ac_status=$? cat out/conftest.err >&5 echo "$as_me:$LINENO: \$? = $ac_status" >&5 if (exit $ac_status) && test -s out/conftest2.$ac_objext then # The compiler can only warn and ignore the option if not recognized # So say no if there are warnings $ECHO "$_lt_compiler_boilerplate" | $SED '/^$/d' > out/conftest.exp $SED '/^$/d; /^ *+/d' out/conftest.err >out/conftest.er2 if test ! -s out/conftest.er2 || diff out/conftest.exp out/conftest.er2 >/dev/null; then lt_cv_prog_compiler_c_o=yes fi fi chmod u+w . 2>&5 $RM conftest* # SGI C++ compiler will create directory out/ii_files/ for # template instantiation test -d out/ii_files && $RM out/ii_files/* && rmdir out/ii_files $RM out/* && rmdir out cd .. $RM -r conftest $RM conftest* fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_compiler_c_o" >&5 $as_echo "$lt_cv_prog_compiler_c_o" >&6; } hard_links="nottested" if test "$lt_cv_prog_compiler_c_o" = no && test "$need_locks" != no; then # do not overwrite the value of need_locks provided by the user { $as_echo "$as_me:${as_lineno-$LINENO}: checking if we can lock with hard links" >&5 $as_echo_n "checking if we can lock with hard links... " >&6; } hard_links=yes $RM conftest* ln conftest.a conftest.b 2>/dev/null && hard_links=no touch conftest.a ln conftest.a conftest.b 2>&5 || hard_links=no ln conftest.a conftest.b 2>/dev/null && hard_links=no { $as_echo "$as_me:${as_lineno-$LINENO}: result: $hard_links" >&5 $as_echo "$hard_links" >&6; } if test "$hard_links" = no; then { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: \`$CC' does not support \`-c -o', so \`make -j' may be unsafe" >&5 $as_echo "$as_me: WARNING: \`$CC' does not support \`-c -o', so \`make -j' may be unsafe" >&2;} need_locks=warn fi else need_locks=no fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether the $compiler linker ($LD) supports shared libraries" >&5 $as_echo_n "checking whether the $compiler linker ($LD) supports shared libraries... " >&6; } runpath_var= allow_undefined_flag= always_export_symbols=no archive_cmds= archive_expsym_cmds= compiler_needs_object=no enable_shared_with_static_runtimes=no export_dynamic_flag_spec= export_symbols_cmds='$NM $libobjs $convenience | $global_symbol_pipe | $SED '\''s/.* //'\'' | sort | uniq > $export_symbols' hardcode_automatic=no hardcode_direct=no hardcode_direct_absolute=no hardcode_libdir_flag_spec= hardcode_libdir_separator= hardcode_minus_L=no hardcode_shlibpath_var=unsupported inherit_rpath=no link_all_deplibs=unknown module_cmds= module_expsym_cmds= old_archive_from_new_cmds= old_archive_from_expsyms_cmds= thread_safe_flag_spec= whole_archive_flag_spec= # include_expsyms should be a list of space-separated symbols to be *always* # included in the symbol list include_expsyms= # exclude_expsyms can be an extended regexp of symbols to exclude # it will be wrapped by ` (' and `)$', so one must not match beginning or # end of line. Example: `a|bc|.*d.*' will exclude the symbols `a' and `bc', # as well as any symbol that contains `d'. exclude_expsyms='_GLOBAL_OFFSET_TABLE_|_GLOBAL__F[ID]_.*' # Although _GLOBAL_OFFSET_TABLE_ is a valid symbol C name, most a.out # platforms (ab)use it in PIC code, but their linkers get confused if # the symbol is explicitly referenced. Since portable code cannot # rely on this symbol name, it's probably fine to never include it in # preloaded symbol tables. # Exclude shared library initialization/finalization symbols. extract_expsyms_cmds= case $host_os in cygwin* | mingw* | pw32* | cegcc*) # FIXME: the MSVC++ port hasn't been tested in a loooong time # When not using gcc, we currently assume that we are using # Microsoft Visual C++. if test "$GCC" != yes; then with_gnu_ld=no fi ;; interix*) # we just hope/assume this is gcc and not c89 (= MSVC++) with_gnu_ld=yes ;; openbsd*) with_gnu_ld=no ;; esac ld_shlibs=yes # On some targets, GNU ld is compatible enough with the native linker # that we're better off using the native interface for both. lt_use_gnu_ld_interface=no if test "$with_gnu_ld" = yes; then case $host_os in aix*) # The AIX port of GNU ld has always aspired to compatibility # with the native linker. However, as the warning in the GNU ld # block says, versions before 2.19.5* couldn't really create working # shared libraries, regardless of the interface used. case `$LD -v 2>&1` in *\ \(GNU\ Binutils\)\ 2.19.5*) ;; *\ \(GNU\ Binutils\)\ 2.[2-9]*) ;; *\ \(GNU\ Binutils\)\ [3-9]*) ;; *) lt_use_gnu_ld_interface=yes ;; esac ;; *) lt_use_gnu_ld_interface=yes ;; esac fi if test "$lt_use_gnu_ld_interface" = yes; then # If archive_cmds runs LD, not CC, wlarc should be empty wlarc='${wl}' # Set some defaults for GNU ld with shared library support. These # are reset later if shared libraries are not supported. Putting them # here allows them to be overridden if necessary. runpath_var=LD_RUN_PATH hardcode_libdir_flag_spec='${wl}-rpath ${wl}$libdir' export_dynamic_flag_spec='${wl}--export-dynamic' # ancient GNU ld didn't support --whole-archive et. al. if $LD --help 2>&1 | $GREP 'no-whole-archive' > /dev/null; then whole_archive_flag_spec="$wlarc"'--whole-archive$convenience '"$wlarc"'--no-whole-archive' else whole_archive_flag_spec= fi supports_anon_versioning=no case `$LD -v 2>&1` in *GNU\ gold*) supports_anon_versioning=yes ;; *\ [01].* | *\ 2.[0-9].* | *\ 2.10.*) ;; # catch versions < 2.11 *\ 2.11.93.0.2\ *) supports_anon_versioning=yes ;; # RH7.3 ... *\ 2.11.92.0.12\ *) supports_anon_versioning=yes ;; # Mandrake 8.2 ... *\ 2.11.*) ;; # other 2.11 versions *) supports_anon_versioning=yes ;; esac # See if GNU ld supports shared libraries. case $host_os in aix[3-9]*) # On AIX/PPC, the GNU linker is very broken if test "$host_cpu" != ia64; then ld_shlibs=no cat <<_LT_EOF 1>&2 *** Warning: the GNU linker, at least up to release 2.19, is reported *** to be unable to reliably create shared libraries on AIX. *** Therefore, libtool is disabling shared libraries support. If you *** really care for shared libraries, you may want to install binutils *** 2.20 or above, or modify your PATH so that a non-GNU linker is found. *** You will then need to restart the configuration process. _LT_EOF fi ;; amigaos*) case $host_cpu in powerpc) # see comment about AmigaOS4 .so support archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' archive_expsym_cmds='' ;; m68k) archive_cmds='$RM $output_objdir/a2ixlibrary.data~$ECHO "#define NAME $libname" > $output_objdir/a2ixlibrary.data~$ECHO "#define LIBRARY_ID 1" >> $output_objdir/a2ixlibrary.data~$ECHO "#define VERSION $major" >> $output_objdir/a2ixlibrary.data~$ECHO "#define REVISION $revision" >> $output_objdir/a2ixlibrary.data~$AR $AR_FLAGS $lib $libobjs~$RANLIB $lib~(cd $output_objdir && a2ixlibrary -32)' hardcode_libdir_flag_spec='-L$libdir' hardcode_minus_L=yes ;; esac ;; beos*) if $LD --help 2>&1 | $GREP ': supported targets:.* elf' > /dev/null; then allow_undefined_flag=unsupported # Joseph Beckenbach says some releases of gcc # support --undefined. This deserves some investigation. FIXME archive_cmds='$CC -nostart $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' else ld_shlibs=no fi ;; cygwin* | mingw* | pw32* | cegcc*) # _LT_TAGVAR(hardcode_libdir_flag_spec, ) is actually meaningless, # as there is no search path for DLLs. hardcode_libdir_flag_spec='-L$libdir' export_dynamic_flag_spec='${wl}--export-all-symbols' allow_undefined_flag=unsupported always_export_symbols=no enable_shared_with_static_runtimes=yes export_symbols_cmds='$NM $libobjs $convenience | $global_symbol_pipe | $SED -e '\''/^[BCDGRS][ ]/s/.*[ ]\([^ ]*\)/\1 DATA/;s/^.*[ ]__nm__\([^ ]*\)[ ][^ ]*/\1 DATA/;/^I[ ]/d;/^[AITW][ ]/s/.* //'\'' | sort | uniq > $export_symbols' exclude_expsyms='[_]+GLOBAL_OFFSET_TABLE_|[_]+GLOBAL__[FID]_.*|[_]+head_[A-Za-z0-9_]+_dll|[A-Za-z0-9_]+_dll_iname' if $LD --help 2>&1 | $GREP 'auto-import' > /dev/null; then archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags -o $output_objdir/$soname ${wl}--enable-auto-image-base -Xlinker --out-implib -Xlinker $lib' # If the export-symbols file already is a .def file (1st line # is EXPORTS), use it as is; otherwise, prepend... archive_expsym_cmds='if test "x`$SED 1q $export_symbols`" = xEXPORTS; then cp $export_symbols $output_objdir/$soname.def; else echo EXPORTS > $output_objdir/$soname.def; cat $export_symbols >> $output_objdir/$soname.def; fi~ $CC -shared $output_objdir/$soname.def $libobjs $deplibs $compiler_flags -o $output_objdir/$soname ${wl}--enable-auto-image-base -Xlinker --out-implib -Xlinker $lib' else ld_shlibs=no fi ;; haiku*) archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' link_all_deplibs=yes ;; interix[3-9]*) hardcode_direct=no hardcode_shlibpath_var=no hardcode_libdir_flag_spec='${wl}-rpath,$libdir' export_dynamic_flag_spec='${wl}-E' # Hack: On Interix 3.x, we cannot compile PIC because of a broken gcc. # Instead, shared libraries are loaded at an image base (0x10000000 by # default) and relocated if they conflict, which is a slow very memory # consuming and fragmenting process. To avoid this, we pick a random, # 256 KiB-aligned image base between 0x50000000 and 0x6FFC0000 at link # time. Moving up from 0x10000000 also allows more sbrk(2) space. archive_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-h,$soname ${wl}--image-base,`expr ${RANDOM-$$} % 4096 / 2 \* 262144 + 1342177280` -o $lib' archive_expsym_cmds='sed "s,^,_," $export_symbols >$output_objdir/$soname.expsym~$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-h,$soname ${wl}--retain-symbols-file,$output_objdir/$soname.expsym ${wl}--image-base,`expr ${RANDOM-$$} % 4096 / 2 \* 262144 + 1342177280` -o $lib' ;; gnu* | linux* | tpf* | k*bsd*-gnu | kopensolaris*-gnu) tmp_diet=no if test "$host_os" = linux-dietlibc; then case $cc_basename in diet\ *) tmp_diet=yes;; # linux-dietlibc with static linking (!diet-dyn) esac fi if $LD --help 2>&1 | $EGREP ': supported targets:.* elf' > /dev/null \ && test "$tmp_diet" = no then tmp_addflag=' $pic_flag' tmp_sharedflag='-shared' case $cc_basename,$host_cpu in pgcc*) # Portland Group C compiler whole_archive_flag_spec='${wl}--whole-archive`for conv in $convenience\"\"; do test -n \"$conv\" && new_convenience=\"$new_convenience,$conv\"; done; func_echo_all \"$new_convenience\"` ${wl}--no-whole-archive' tmp_addflag=' $pic_flag' ;; pgf77* | pgf90* | pgf95* | pgfortran*) # Portland Group f77 and f90 compilers whole_archive_flag_spec='${wl}--whole-archive`for conv in $convenience\"\"; do test -n \"$conv\" && new_convenience=\"$new_convenience,$conv\"; done; func_echo_all \"$new_convenience\"` ${wl}--no-whole-archive' tmp_addflag=' $pic_flag -Mnomain' ;; ecc*,ia64* | icc*,ia64*) # Intel C compiler on ia64 tmp_addflag=' -i_dynamic' ;; efc*,ia64* | ifort*,ia64*) # Intel Fortran compiler on ia64 tmp_addflag=' -i_dynamic -nofor_main' ;; ifc* | ifort*) # Intel Fortran compiler tmp_addflag=' -nofor_main' ;; lf95*) # Lahey Fortran 8.1 whole_archive_flag_spec= tmp_sharedflag='--shared' ;; xl[cC]* | bgxl[cC]* | mpixl[cC]*) # IBM XL C 8.0 on PPC (deal with xlf below) tmp_sharedflag='-qmkshrobj' tmp_addflag= ;; nvcc*) # Cuda Compiler Driver 2.2 whole_archive_flag_spec='${wl}--whole-archive`for conv in $convenience\"\"; do test -n \"$conv\" && new_convenience=\"$new_convenience,$conv\"; done; func_echo_all \"$new_convenience\"` ${wl}--no-whole-archive' compiler_needs_object=yes ;; esac case `$CC -V 2>&1 | sed 5q` in *Sun\ C*) # Sun C 5.9 whole_archive_flag_spec='${wl}--whole-archive`new_convenience=; for conv in $convenience\"\"; do test -z \"$conv\" || new_convenience=\"$new_convenience,$conv\"; done; func_echo_all \"$new_convenience\"` ${wl}--no-whole-archive' compiler_needs_object=yes tmp_sharedflag='-G' ;; *Sun\ F*) # Sun Fortran 8.3 tmp_sharedflag='-G' ;; esac archive_cmds='$CC '"$tmp_sharedflag""$tmp_addflag"' $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' if test "x$supports_anon_versioning" = xyes; then archive_expsym_cmds='echo "{ global:" > $output_objdir/$libname.ver~ cat $export_symbols | sed -e "s/\(.*\)/\1;/" >> $output_objdir/$libname.ver~ echo "local: *; };" >> $output_objdir/$libname.ver~ $CC '"$tmp_sharedflag""$tmp_addflag"' $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-version-script ${wl}$output_objdir/$libname.ver -o $lib' fi case $cc_basename in xlf* | bgf* | bgxlf* | mpixlf*) # IBM XL Fortran 10.1 on PPC cannot create shared libs itself whole_archive_flag_spec='--whole-archive$convenience --no-whole-archive' hardcode_libdir_flag_spec='${wl}-rpath ${wl}$libdir' archive_cmds='$LD -shared $libobjs $deplibs $linker_flags -soname $soname -o $lib' if test "x$supports_anon_versioning" = xyes; then archive_expsym_cmds='echo "{ global:" > $output_objdir/$libname.ver~ cat $export_symbols | sed -e "s/\(.*\)/\1;/" >> $output_objdir/$libname.ver~ echo "local: *; };" >> $output_objdir/$libname.ver~ $LD -shared $libobjs $deplibs $linker_flags -soname $soname -version-script $output_objdir/$libname.ver -o $lib' fi ;; esac else ld_shlibs=no fi ;; netbsd*) if echo __ELF__ | $CC -E - | $GREP __ELF__ >/dev/null; then archive_cmds='$LD -Bshareable $libobjs $deplibs $linker_flags -o $lib' wlarc= else archive_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' archive_expsym_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' fi ;; solaris*) if $LD -v 2>&1 | $GREP 'BFD 2\.8' > /dev/null; then ld_shlibs=no cat <<_LT_EOF 1>&2 *** Warning: The releases 2.8.* of the GNU linker cannot reliably *** create shared libraries on Solaris systems. Therefore, libtool *** is disabling shared libraries support. We urge you to upgrade GNU *** binutils to release 2.9.1 or newer. Another option is to modify *** your PATH or compiler configuration so that the native linker is *** used, and then restart. _LT_EOF elif $LD --help 2>&1 | $GREP ': supported targets:.* elf' > /dev/null; then archive_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' archive_expsym_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' else ld_shlibs=no fi ;; sysv5* | sco3.2v5* | sco5v6* | unixware* | OpenUNIX*) case `$LD -v 2>&1` in *\ [01].* | *\ 2.[0-9].* | *\ 2.1[0-5].*) ld_shlibs=no cat <<_LT_EOF 1>&2 *** Warning: Releases of the GNU linker prior to 2.16.91.0.3 can not *** reliably create shared libraries on SCO systems. Therefore, libtool *** is disabling shared libraries support. We urge you to upgrade GNU *** binutils to release 2.16.91.0.3 or newer. Another option is to modify *** your PATH or compiler configuration so that the native linker is *** used, and then restart. _LT_EOF ;; *) # For security reasons, it is highly recommended that you always # use absolute paths for naming shared libraries, and exclude the # DT_RUNPATH tag from executables and libraries. But doing so # requires that you compile everything twice, which is a pain. if $LD --help 2>&1 | $GREP ': supported targets:.* elf' > /dev/null; then hardcode_libdir_flag_spec='${wl}-rpath ${wl}$libdir' archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' archive_expsym_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' else ld_shlibs=no fi ;; esac ;; sunos4*) archive_cmds='$LD -assert pure-text -Bshareable -o $lib $libobjs $deplibs $linker_flags' wlarc= hardcode_direct=yes hardcode_shlibpath_var=no ;; *) if $LD --help 2>&1 | $GREP ': supported targets:.* elf' > /dev/null; then archive_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' archive_expsym_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' else ld_shlibs=no fi ;; esac if test "$ld_shlibs" = no; then runpath_var= hardcode_libdir_flag_spec= export_dynamic_flag_spec= whole_archive_flag_spec= fi else # PORTME fill in a description of your system's linker (not GNU ld) case $host_os in aix3*) allow_undefined_flag=unsupported always_export_symbols=yes archive_expsym_cmds='$LD -o $output_objdir/$soname $libobjs $deplibs $linker_flags -bE:$export_symbols -T512 -H512 -bM:SRE~$AR $AR_FLAGS $lib $output_objdir/$soname' # Note: this linker hardcodes the directories in LIBPATH if there # are no directories specified by -L. hardcode_minus_L=yes if test "$GCC" = yes && test -z "$lt_prog_compiler_static"; then # Neither direct hardcoding nor static linking is supported with a # broken collect2. hardcode_direct=unsupported fi ;; aix[4-9]*) if test "$host_cpu" = ia64; then # On IA64, the linker does run time linking by default, so we don't # have to do anything special. aix_use_runtimelinking=no exp_sym_flag='-Bexport' no_entry_flag="" else # If we're using GNU nm, then we don't want the "-C" option. # -C means demangle to AIX nm, but means don't demangle with GNU nm # Also, AIX nm treats weak defined symbols like other global # defined symbols, whereas GNU nm marks them as "W". if $NM -V 2>&1 | $GREP 'GNU' > /dev/null; then export_symbols_cmds='$NM -Bpg $libobjs $convenience | awk '\''{ if (((\$ 2 == "T") || (\$ 2 == "D") || (\$ 2 == "B") || (\$ 2 == "W")) && (substr(\$ 3,1,1) != ".")) { print \$ 3 } }'\'' | sort -u > $export_symbols' else export_symbols_cmds='$NM -BCpg $libobjs $convenience | awk '\''{ if (((\$ 2 == "T") || (\$ 2 == "D") || (\$ 2 == "B")) && (substr(\$ 3,1,1) != ".")) { print \$ 3 } }'\'' | sort -u > $export_symbols' fi aix_use_runtimelinking=no # Test if we are trying to use run time linking or normal # AIX style linking. If -brtl is somewhere in LDFLAGS, we # need to do runtime linking. case $host_os in aix4.[23]|aix4.[23].*|aix[5-9]*) for ld_flag in $LDFLAGS; do if (test $ld_flag = "-brtl" || test $ld_flag = "-Wl,-brtl"); then aix_use_runtimelinking=yes break fi done ;; esac exp_sym_flag='-bexport' no_entry_flag='-bnoentry' fi # When large executables or shared objects are built, AIX ld can # have problems creating the table of contents. If linking a library # or program results in "error TOC overflow" add -mminimal-toc to # CXXFLAGS/CFLAGS for g++/gcc. In the cases where that is not # enough to fix the problem, add -Wl,-bbigtoc to LDFLAGS. archive_cmds='' hardcode_direct=yes hardcode_direct_absolute=yes hardcode_libdir_separator=':' link_all_deplibs=yes file_list_spec='${wl}-f,' if test "$GCC" = yes; then case $host_os in aix4.[012]|aix4.[012].*) # We only want to do this on AIX 4.2 and lower, the check # below for broken collect2 doesn't work under 4.3+ collect2name=`${CC} -print-prog-name=collect2` if test -f "$collect2name" && strings "$collect2name" | $GREP resolve_lib_name >/dev/null then # We have reworked collect2 : else # We have old collect2 hardcode_direct=unsupported # It fails to find uninstalled libraries when the uninstalled # path is not listed in the libpath. Setting hardcode_minus_L # to unsupported forces relinking hardcode_minus_L=yes hardcode_libdir_flag_spec='-L$libdir' hardcode_libdir_separator= fi ;; esac shared_flag='-shared' if test "$aix_use_runtimelinking" = yes; then shared_flag="$shared_flag "'${wl}-G' fi else # not using gcc if test "$host_cpu" = ia64; then # VisualAge C++, Version 5.5 for AIX 5L for IA-64, Beta 3 Release # chokes on -Wl,-G. The following line is correct: shared_flag='-G' else if test "$aix_use_runtimelinking" = yes; then shared_flag='${wl}-G' else shared_flag='${wl}-bM:SRE' fi fi fi export_dynamic_flag_spec='${wl}-bexpall' # It seems that -bexpall does not export symbols beginning with # underscore (_), so it is better to generate a list of symbols to export. always_export_symbols=yes if test "$aix_use_runtimelinking" = yes; then # Warning - without using the other runtime loading flags (-brtl), # -berok will link without error, but may produce a broken library. allow_undefined_flag='-berok' # Determine the default libpath from the value encoded in an # empty executable. if test "${lt_cv_aix_libpath+set}" = set; then aix_libpath=$lt_cv_aix_libpath else if ${lt_cv_aix_libpath_+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : lt_aix_libpath_sed=' /Import File Strings/,/^$/ { /^0/ { s/^0 *\([^ ]*\) *$/\1/ p } }' lt_cv_aix_libpath_=`dump -H conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` # Check for a 64-bit object if we didn't find anything. if test -z "$lt_cv_aix_libpath_"; then lt_cv_aix_libpath_=`dump -HX64 conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` fi fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext if test -z "$lt_cv_aix_libpath_"; then lt_cv_aix_libpath_="/usr/lib:/lib" fi fi aix_libpath=$lt_cv_aix_libpath_ fi hardcode_libdir_flag_spec='${wl}-blibpath:$libdir:'"$aix_libpath" archive_expsym_cmds='$CC -o $output_objdir/$soname $libobjs $deplibs '"\${wl}$no_entry_flag"' $compiler_flags `if test "x${allow_undefined_flag}" != "x"; then func_echo_all "${wl}${allow_undefined_flag}"; else :; fi` '"\${wl}$exp_sym_flag:\$export_symbols $shared_flag" else if test "$host_cpu" = ia64; then hardcode_libdir_flag_spec='${wl}-R $libdir:/usr/lib:/lib' allow_undefined_flag="-z nodefs" archive_expsym_cmds="\$CC $shared_flag"' -o $output_objdir/$soname $libobjs $deplibs '"\${wl}$no_entry_flag"' $compiler_flags ${wl}${allow_undefined_flag} '"\${wl}$exp_sym_flag:\$export_symbols" else # Determine the default libpath from the value encoded in an # empty executable. if test "${lt_cv_aix_libpath+set}" = set; then aix_libpath=$lt_cv_aix_libpath else if ${lt_cv_aix_libpath_+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : lt_aix_libpath_sed=' /Import File Strings/,/^$/ { /^0/ { s/^0 *\([^ ]*\) *$/\1/ p } }' lt_cv_aix_libpath_=`dump -H conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` # Check for a 64-bit object if we didn't find anything. if test -z "$lt_cv_aix_libpath_"; then lt_cv_aix_libpath_=`dump -HX64 conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` fi fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext if test -z "$lt_cv_aix_libpath_"; then lt_cv_aix_libpath_="/usr/lib:/lib" fi fi aix_libpath=$lt_cv_aix_libpath_ fi hardcode_libdir_flag_spec='${wl}-blibpath:$libdir:'"$aix_libpath" # Warning - without using the other run time loading flags, # -berok will link without error, but may produce a broken library. no_undefined_flag=' ${wl}-bernotok' allow_undefined_flag=' ${wl}-berok' if test "$with_gnu_ld" = yes; then # We only use this code for GNU lds that support --whole-archive. whole_archive_flag_spec='${wl}--whole-archive$convenience ${wl}--no-whole-archive' else # Exported symbols can be pulled into shared objects from archives whole_archive_flag_spec='$convenience' fi archive_cmds_need_lc=yes # This is similar to how AIX traditionally builds its shared libraries. archive_expsym_cmds="\$CC $shared_flag"' -o $output_objdir/$soname $libobjs $deplibs ${wl}-bnoentry $compiler_flags ${wl}-bE:$export_symbols${allow_undefined_flag}~$AR $AR_FLAGS $output_objdir/$libname$release.a $output_objdir/$soname' fi fi ;; amigaos*) case $host_cpu in powerpc) # see comment about AmigaOS4 .so support archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' archive_expsym_cmds='' ;; m68k) archive_cmds='$RM $output_objdir/a2ixlibrary.data~$ECHO "#define NAME $libname" > $output_objdir/a2ixlibrary.data~$ECHO "#define LIBRARY_ID 1" >> $output_objdir/a2ixlibrary.data~$ECHO "#define VERSION $major" >> $output_objdir/a2ixlibrary.data~$ECHO "#define REVISION $revision" >> $output_objdir/a2ixlibrary.data~$AR $AR_FLAGS $lib $libobjs~$RANLIB $lib~(cd $output_objdir && a2ixlibrary -32)' hardcode_libdir_flag_spec='-L$libdir' hardcode_minus_L=yes ;; esac ;; bsdi[45]*) export_dynamic_flag_spec=-rdynamic ;; cygwin* | mingw* | pw32* | cegcc*) # When not using gcc, we currently assume that we are using # Microsoft Visual C++. # hardcode_libdir_flag_spec is actually meaningless, as there is # no search path for DLLs. case $cc_basename in cl*) # Native MSVC hardcode_libdir_flag_spec=' ' allow_undefined_flag=unsupported always_export_symbols=yes file_list_spec='@' # Tell ltmain to make .lib files, not .a files. libext=lib # Tell ltmain to make .dll files, not .so files. shrext_cmds=".dll" # FIXME: Setting linknames here is a bad hack. archive_cmds='$CC -o $output_objdir/$soname $libobjs $compiler_flags $deplibs -Wl,-dll~linknames=' archive_expsym_cmds='if test "x`$SED 1q $export_symbols`" = xEXPORTS; then sed -n -e 's/\\\\\\\(.*\\\\\\\)/-link\\\ -EXPORT:\\\\\\\1/' -e '1\\\!p' < $export_symbols > $output_objdir/$soname.exp; else sed -e 's/\\\\\\\(.*\\\\\\\)/-link\\\ -EXPORT:\\\\\\\1/' < $export_symbols > $output_objdir/$soname.exp; fi~ $CC -o $tool_output_objdir$soname $libobjs $compiler_flags $deplibs "@$tool_output_objdir$soname.exp" -Wl,-DLL,-IMPLIB:"$tool_output_objdir$libname.dll.lib"~ linknames=' # The linker will not automatically build a static lib if we build a DLL. # _LT_TAGVAR(old_archive_from_new_cmds, )='true' enable_shared_with_static_runtimes=yes exclude_expsyms='_NULL_IMPORT_DESCRIPTOR|_IMPORT_DESCRIPTOR_.*' export_symbols_cmds='$NM $libobjs $convenience | $global_symbol_pipe | $SED -e '\''/^[BCDGRS][ ]/s/.*[ ]\([^ ]*\)/\1,DATA/'\'' | $SED -e '\''/^[AITW][ ]/s/.*[ ]//'\'' | sort | uniq > $export_symbols' # Don't use ranlib old_postinstall_cmds='chmod 644 $oldlib' postlink_cmds='lt_outputfile="@OUTPUT@"~ lt_tool_outputfile="@TOOL_OUTPUT@"~ case $lt_outputfile in *.exe|*.EXE) ;; *) lt_outputfile="$lt_outputfile.exe" lt_tool_outputfile="$lt_tool_outputfile.exe" ;; esac~ if test "$MANIFEST_TOOL" != ":" && test -f "$lt_outputfile.manifest"; then $MANIFEST_TOOL -manifest "$lt_tool_outputfile.manifest" -outputresource:"$lt_tool_outputfile" || exit 1; $RM "$lt_outputfile.manifest"; fi' ;; *) # Assume MSVC wrapper hardcode_libdir_flag_spec=' ' allow_undefined_flag=unsupported # Tell ltmain to make .lib files, not .a files. libext=lib # Tell ltmain to make .dll files, not .so files. shrext_cmds=".dll" # FIXME: Setting linknames here is a bad hack. archive_cmds='$CC -o $lib $libobjs $compiler_flags `func_echo_all "$deplibs" | $SED '\''s/ -lc$//'\''` -link -dll~linknames=' # The linker will automatically build a .lib file if we build a DLL. old_archive_from_new_cmds='true' # FIXME: Should let the user specify the lib program. old_archive_cmds='lib -OUT:$oldlib$oldobjs$old_deplibs' enable_shared_with_static_runtimes=yes ;; esac ;; darwin* | rhapsody*) archive_cmds_need_lc=no hardcode_direct=no hardcode_automatic=yes hardcode_shlibpath_var=unsupported if test "$lt_cv_ld_force_load" = "yes"; then whole_archive_flag_spec='`for conv in $convenience\"\"; do test -n \"$conv\" && new_convenience=\"$new_convenience ${wl}-force_load,$conv\"; done; func_echo_all \"$new_convenience\"`' else whole_archive_flag_spec='' fi link_all_deplibs=yes allow_undefined_flag="$_lt_dar_allow_undefined" case $cc_basename in ifort*) _lt_dar_can_shared=yes ;; *) _lt_dar_can_shared=$GCC ;; esac if test "$_lt_dar_can_shared" = "yes"; then output_verbose_link_cmd=func_echo_all archive_cmds="\$CC -dynamiclib \$allow_undefined_flag -o \$lib \$libobjs \$deplibs \$compiler_flags -install_name \$rpath/\$soname \$verstring $_lt_dar_single_mod${_lt_dsymutil}" module_cmds="\$CC \$allow_undefined_flag -o \$lib -bundle \$libobjs \$deplibs \$compiler_flags${_lt_dsymutil}" archive_expsym_cmds="sed 's,^,_,' < \$export_symbols > \$output_objdir/\${libname}-symbols.expsym~\$CC -dynamiclib \$allow_undefined_flag -o \$lib \$libobjs \$deplibs \$compiler_flags -install_name \$rpath/\$soname \$verstring ${_lt_dar_single_mod}${_lt_dar_export_syms}${_lt_dsymutil}" module_expsym_cmds="sed -e 's,^,_,' < \$export_symbols > \$output_objdir/\${libname}-symbols.expsym~\$CC \$allow_undefined_flag -o \$lib -bundle \$libobjs \$deplibs \$compiler_flags${_lt_dar_export_syms}${_lt_dsymutil}" else ld_shlibs=no fi ;; dgux*) archive_cmds='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' hardcode_libdir_flag_spec='-L$libdir' hardcode_shlibpath_var=no ;; # FreeBSD 2.2.[012] allows us to include c++rt0.o to get C++ constructor # support. Future versions do this automatically, but an explicit c++rt0.o # does not break anything, and helps significantly (at the cost of a little # extra space). freebsd2.2*) archive_cmds='$LD -Bshareable -o $lib $libobjs $deplibs $linker_flags /usr/lib/c++rt0.o' hardcode_libdir_flag_spec='-R$libdir' hardcode_direct=yes hardcode_shlibpath_var=no ;; # Unfortunately, older versions of FreeBSD 2 do not have this feature. freebsd2.*) archive_cmds='$LD -Bshareable -o $lib $libobjs $deplibs $linker_flags' hardcode_direct=yes hardcode_minus_L=yes hardcode_shlibpath_var=no ;; # FreeBSD 3 and greater uses gcc -shared to do shared libraries. freebsd* | dragonfly*) archive_cmds='$CC -shared $pic_flag -o $lib $libobjs $deplibs $compiler_flags' hardcode_libdir_flag_spec='-R$libdir' hardcode_direct=yes hardcode_shlibpath_var=no ;; hpux9*) if test "$GCC" = yes; then archive_cmds='$RM $output_objdir/$soname~$CC -shared $pic_flag ${wl}+b ${wl}$install_libdir -o $output_objdir/$soname $libobjs $deplibs $compiler_flags~test $output_objdir/$soname = $lib || mv $output_objdir/$soname $lib' else archive_cmds='$RM $output_objdir/$soname~$LD -b +b $install_libdir -o $output_objdir/$soname $libobjs $deplibs $linker_flags~test $output_objdir/$soname = $lib || mv $output_objdir/$soname $lib' fi hardcode_libdir_flag_spec='${wl}+b ${wl}$libdir' hardcode_libdir_separator=: hardcode_direct=yes # hardcode_minus_L: Not really in the search PATH, # but as the default location of the library. hardcode_minus_L=yes export_dynamic_flag_spec='${wl}-E' ;; hpux10*) if test "$GCC" = yes && test "$with_gnu_ld" = no; then archive_cmds='$CC -shared $pic_flag ${wl}+h ${wl}$soname ${wl}+b ${wl}$install_libdir -o $lib $libobjs $deplibs $compiler_flags' else archive_cmds='$LD -b +h $soname +b $install_libdir -o $lib $libobjs $deplibs $linker_flags' fi if test "$with_gnu_ld" = no; then hardcode_libdir_flag_spec='${wl}+b ${wl}$libdir' hardcode_libdir_separator=: hardcode_direct=yes hardcode_direct_absolute=yes export_dynamic_flag_spec='${wl}-E' # hardcode_minus_L: Not really in the search PATH, # but as the default location of the library. hardcode_minus_L=yes fi ;; hpux11*) if test "$GCC" = yes && test "$with_gnu_ld" = no; then case $host_cpu in hppa*64*) archive_cmds='$CC -shared ${wl}+h ${wl}$soname -o $lib $libobjs $deplibs $compiler_flags' ;; ia64*) archive_cmds='$CC -shared $pic_flag ${wl}+h ${wl}$soname ${wl}+nodefaultrpath -o $lib $libobjs $deplibs $compiler_flags' ;; *) archive_cmds='$CC -shared $pic_flag ${wl}+h ${wl}$soname ${wl}+b ${wl}$install_libdir -o $lib $libobjs $deplibs $compiler_flags' ;; esac else case $host_cpu in hppa*64*) archive_cmds='$CC -b ${wl}+h ${wl}$soname -o $lib $libobjs $deplibs $compiler_flags' ;; ia64*) archive_cmds='$CC -b ${wl}+h ${wl}$soname ${wl}+nodefaultrpath -o $lib $libobjs $deplibs $compiler_flags' ;; *) # Older versions of the 11.00 compiler do not understand -b yet # (HP92453-01 A.11.01.20 doesn't, HP92453-01 B.11.X.35175-35176.GP does) { $as_echo "$as_me:${as_lineno-$LINENO}: checking if $CC understands -b" >&5 $as_echo_n "checking if $CC understands -b... " >&6; } if ${lt_cv_prog_compiler__b+:} false; then : $as_echo_n "(cached) " >&6 else lt_cv_prog_compiler__b=no save_LDFLAGS="$LDFLAGS" LDFLAGS="$LDFLAGS -b" echo "$lt_simple_link_test_code" > conftest.$ac_ext if (eval $ac_link 2>conftest.err) && test -s conftest$ac_exeext; then # The linker can only warn and ignore the option if not recognized # So say no if there are warnings if test -s conftest.err; then # Append any errors to the config.log. cat conftest.err 1>&5 $ECHO "$_lt_linker_boilerplate" | $SED '/^$/d' > conftest.exp $SED '/^$/d; /^ *+/d' conftest.err >conftest.er2 if diff conftest.exp conftest.er2 >/dev/null; then lt_cv_prog_compiler__b=yes fi else lt_cv_prog_compiler__b=yes fi fi $RM -r conftest* LDFLAGS="$save_LDFLAGS" fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_compiler__b" >&5 $as_echo "$lt_cv_prog_compiler__b" >&6; } if test x"$lt_cv_prog_compiler__b" = xyes; then archive_cmds='$CC -b ${wl}+h ${wl}$soname ${wl}+b ${wl}$install_libdir -o $lib $libobjs $deplibs $compiler_flags' else archive_cmds='$LD -b +h $soname +b $install_libdir -o $lib $libobjs $deplibs $linker_flags' fi ;; esac fi if test "$with_gnu_ld" = no; then hardcode_libdir_flag_spec='${wl}+b ${wl}$libdir' hardcode_libdir_separator=: case $host_cpu in hppa*64*|ia64*) hardcode_direct=no hardcode_shlibpath_var=no ;; *) hardcode_direct=yes hardcode_direct_absolute=yes export_dynamic_flag_spec='${wl}-E' # hardcode_minus_L: Not really in the search PATH, # but as the default location of the library. hardcode_minus_L=yes ;; esac fi ;; irix5* | irix6* | nonstopux*) if test "$GCC" = yes; then archive_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && func_echo_all "${wl}-set_version ${wl}$verstring"` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib' # Try to use the -exported_symbol ld option, if it does not # work, assume that -exports_file does not work either and # implicitly export all symbols. # This should be the same for all languages, so no per-tag cache variable. { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether the $host_os linker accepts -exported_symbol" >&5 $as_echo_n "checking whether the $host_os linker accepts -exported_symbol... " >&6; } if ${lt_cv_irix_exported_symbol+:} false; then : $as_echo_n "(cached) " >&6 else save_LDFLAGS="$LDFLAGS" LDFLAGS="$LDFLAGS -shared ${wl}-exported_symbol ${wl}foo ${wl}-update_registry ${wl}/dev/null" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int foo (void) { return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : lt_cv_irix_exported_symbol=yes else lt_cv_irix_exported_symbol=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext LDFLAGS="$save_LDFLAGS" fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_irix_exported_symbol" >&5 $as_echo "$lt_cv_irix_exported_symbol" >&6; } if test "$lt_cv_irix_exported_symbol" = yes; then archive_expsym_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && func_echo_all "${wl}-set_version ${wl}$verstring"` ${wl}-update_registry ${wl}${output_objdir}/so_locations ${wl}-exports_file ${wl}$export_symbols -o $lib' fi else archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags -soname $soname `test -n "$verstring" && func_echo_all "-set_version $verstring"` -update_registry ${output_objdir}/so_locations -o $lib' archive_expsym_cmds='$CC -shared $libobjs $deplibs $compiler_flags -soname $soname `test -n "$verstring" && func_echo_all "-set_version $verstring"` -update_registry ${output_objdir}/so_locations -exports_file $export_symbols -o $lib' fi archive_cmds_need_lc='no' hardcode_libdir_flag_spec='${wl}-rpath ${wl}$libdir' hardcode_libdir_separator=: inherit_rpath=yes link_all_deplibs=yes ;; netbsd*) if echo __ELF__ | $CC -E - | $GREP __ELF__ >/dev/null; then archive_cmds='$LD -Bshareable -o $lib $libobjs $deplibs $linker_flags' # a.out else archive_cmds='$LD -shared -o $lib $libobjs $deplibs $linker_flags' # ELF fi hardcode_libdir_flag_spec='-R$libdir' hardcode_direct=yes hardcode_shlibpath_var=no ;; newsos6) archive_cmds='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' hardcode_direct=yes hardcode_libdir_flag_spec='${wl}-rpath ${wl}$libdir' hardcode_libdir_separator=: hardcode_shlibpath_var=no ;; *nto* | *qnx*) ;; openbsd*) if test -f /usr/libexec/ld.so; then hardcode_direct=yes hardcode_shlibpath_var=no hardcode_direct_absolute=yes if test -z "`echo __ELF__ | $CC -E - | $GREP __ELF__`" || test "$host_os-$host_cpu" = "openbsd2.8-powerpc"; then archive_cmds='$CC -shared $pic_flag -o $lib $libobjs $deplibs $compiler_flags' archive_expsym_cmds='$CC -shared $pic_flag -o $lib $libobjs $deplibs $compiler_flags ${wl}-retain-symbols-file,$export_symbols' hardcode_libdir_flag_spec='${wl}-rpath,$libdir' export_dynamic_flag_spec='${wl}-E' else case $host_os in openbsd[01].* | openbsd2.[0-7] | openbsd2.[0-7].*) archive_cmds='$LD -Bshareable -o $lib $libobjs $deplibs $linker_flags' hardcode_libdir_flag_spec='-R$libdir' ;; *) archive_cmds='$CC -shared $pic_flag -o $lib $libobjs $deplibs $compiler_flags' hardcode_libdir_flag_spec='${wl}-rpath,$libdir' ;; esac fi else ld_shlibs=no fi ;; os2*) hardcode_libdir_flag_spec='-L$libdir' hardcode_minus_L=yes allow_undefined_flag=unsupported archive_cmds='$ECHO "LIBRARY $libname INITINSTANCE" > $output_objdir/$libname.def~$ECHO "DESCRIPTION \"$libname\"" >> $output_objdir/$libname.def~echo DATA >> $output_objdir/$libname.def~echo " SINGLE NONSHARED" >> $output_objdir/$libname.def~echo EXPORTS >> $output_objdir/$libname.def~emxexp $libobjs >> $output_objdir/$libname.def~$CC -Zdll -Zcrtdll -o $lib $libobjs $deplibs $compiler_flags $output_objdir/$libname.def' old_archive_from_new_cmds='emximp -o $output_objdir/$libname.a $output_objdir/$libname.def' ;; osf3*) if test "$GCC" = yes; then allow_undefined_flag=' ${wl}-expect_unresolved ${wl}\*' archive_cmds='$CC -shared${allow_undefined_flag} $libobjs $deplibs $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && func_echo_all "${wl}-set_version ${wl}$verstring"` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib' else allow_undefined_flag=' -expect_unresolved \*' archive_cmds='$CC -shared${allow_undefined_flag} $libobjs $deplibs $compiler_flags -soname $soname `test -n "$verstring" && func_echo_all "-set_version $verstring"` -update_registry ${output_objdir}/so_locations -o $lib' fi archive_cmds_need_lc='no' hardcode_libdir_flag_spec='${wl}-rpath ${wl}$libdir' hardcode_libdir_separator=: ;; osf4* | osf5*) # as osf3* with the addition of -msym flag if test "$GCC" = yes; then allow_undefined_flag=' ${wl}-expect_unresolved ${wl}\*' archive_cmds='$CC -shared${allow_undefined_flag} $pic_flag $libobjs $deplibs $compiler_flags ${wl}-msym ${wl}-soname ${wl}$soname `test -n "$verstring" && func_echo_all "${wl}-set_version ${wl}$verstring"` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib' hardcode_libdir_flag_spec='${wl}-rpath ${wl}$libdir' else allow_undefined_flag=' -expect_unresolved \*' archive_cmds='$CC -shared${allow_undefined_flag} $libobjs $deplibs $compiler_flags -msym -soname $soname `test -n "$verstring" && func_echo_all "-set_version $verstring"` -update_registry ${output_objdir}/so_locations -o $lib' archive_expsym_cmds='for i in `cat $export_symbols`; do printf "%s %s\\n" -exported_symbol "\$i" >> $lib.exp; done; printf "%s\\n" "-hidden">> $lib.exp~ $CC -shared${allow_undefined_flag} ${wl}-input ${wl}$lib.exp $compiler_flags $libobjs $deplibs -soname $soname `test -n "$verstring" && $ECHO "-set_version $verstring"` -update_registry ${output_objdir}/so_locations -o $lib~$RM $lib.exp' # Both c and cxx compiler support -rpath directly hardcode_libdir_flag_spec='-rpath $libdir' fi archive_cmds_need_lc='no' hardcode_libdir_separator=: ;; solaris*) no_undefined_flag=' -z defs' if test "$GCC" = yes; then wlarc='${wl}' archive_cmds='$CC -shared $pic_flag ${wl}-z ${wl}text ${wl}-h ${wl}$soname -o $lib $libobjs $deplibs $compiler_flags' archive_expsym_cmds='echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~echo "local: *; };" >> $lib.exp~ $CC -shared $pic_flag ${wl}-z ${wl}text ${wl}-M ${wl}$lib.exp ${wl}-h ${wl}$soname -o $lib $libobjs $deplibs $compiler_flags~$RM $lib.exp' else case `$CC -V 2>&1` in *"Compilers 5.0"*) wlarc='' archive_cmds='$LD -G${allow_undefined_flag} -h $soname -o $lib $libobjs $deplibs $linker_flags' archive_expsym_cmds='echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~echo "local: *; };" >> $lib.exp~ $LD -G${allow_undefined_flag} -M $lib.exp -h $soname -o $lib $libobjs $deplibs $linker_flags~$RM $lib.exp' ;; *) wlarc='${wl}' archive_cmds='$CC -G${allow_undefined_flag} -h $soname -o $lib $libobjs $deplibs $compiler_flags' archive_expsym_cmds='echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~echo "local: *; };" >> $lib.exp~ $CC -G${allow_undefined_flag} -M $lib.exp -h $soname -o $lib $libobjs $deplibs $compiler_flags~$RM $lib.exp' ;; esac fi hardcode_libdir_flag_spec='-R$libdir' hardcode_shlibpath_var=no case $host_os in solaris2.[0-5] | solaris2.[0-5].*) ;; *) # The compiler driver will combine and reorder linker options, # but understands `-z linker_flag'. GCC discards it without `$wl', # but is careful enough not to reorder. # Supported since Solaris 2.6 (maybe 2.5.1?) if test "$GCC" = yes; then whole_archive_flag_spec='${wl}-z ${wl}allextract$convenience ${wl}-z ${wl}defaultextract' else whole_archive_flag_spec='-z allextract$convenience -z defaultextract' fi ;; esac link_all_deplibs=yes ;; sunos4*) if test "x$host_vendor" = xsequent; then # Use $CC to link under sequent, because it throws in some extra .o # files that make .init and .fini sections work. archive_cmds='$CC -G ${wl}-h $soname -o $lib $libobjs $deplibs $compiler_flags' else archive_cmds='$LD -assert pure-text -Bstatic -o $lib $libobjs $deplibs $linker_flags' fi hardcode_libdir_flag_spec='-L$libdir' hardcode_direct=yes hardcode_minus_L=yes hardcode_shlibpath_var=no ;; sysv4) case $host_vendor in sni) archive_cmds='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' hardcode_direct=yes # is this really true??? ;; siemens) ## LD is ld it makes a PLAMLIB ## CC just makes a GrossModule. archive_cmds='$LD -G -o $lib $libobjs $deplibs $linker_flags' reload_cmds='$CC -r -o $output$reload_objs' hardcode_direct=no ;; motorola) archive_cmds='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' hardcode_direct=no #Motorola manual says yes, but my tests say they lie ;; esac runpath_var='LD_RUN_PATH' hardcode_shlibpath_var=no ;; sysv4.3*) archive_cmds='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' hardcode_shlibpath_var=no export_dynamic_flag_spec='-Bexport' ;; sysv4*MP*) if test -d /usr/nec; then archive_cmds='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' hardcode_shlibpath_var=no runpath_var=LD_RUN_PATH hardcode_runpath_var=yes ld_shlibs=yes fi ;; sysv4*uw2* | sysv5OpenUNIX* | sysv5UnixWare7.[01].[10]* | unixware7* | sco3.2v5.0.[024]*) no_undefined_flag='${wl}-z,text' archive_cmds_need_lc=no hardcode_shlibpath_var=no runpath_var='LD_RUN_PATH' if test "$GCC" = yes; then archive_cmds='$CC -shared ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' archive_expsym_cmds='$CC -shared ${wl}-Bexport:$export_symbols ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' else archive_cmds='$CC -G ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' archive_expsym_cmds='$CC -G ${wl}-Bexport:$export_symbols ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' fi ;; sysv5* | sco3.2v5* | sco5v6*) # Note: We can NOT use -z defs as we might desire, because we do not # link with -lc, and that would cause any symbols used from libc to # always be unresolved, which means just about no library would # ever link correctly. If we're not using GNU ld we use -z text # though, which does catch some bad symbols but isn't as heavy-handed # as -z defs. no_undefined_flag='${wl}-z,text' allow_undefined_flag='${wl}-z,nodefs' archive_cmds_need_lc=no hardcode_shlibpath_var=no hardcode_libdir_flag_spec='${wl}-R,$libdir' hardcode_libdir_separator=':' link_all_deplibs=yes export_dynamic_flag_spec='${wl}-Bexport' runpath_var='LD_RUN_PATH' if test "$GCC" = yes; then archive_cmds='$CC -shared ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' archive_expsym_cmds='$CC -shared ${wl}-Bexport:$export_symbols ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' else archive_cmds='$CC -G ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' archive_expsym_cmds='$CC -G ${wl}-Bexport:$export_symbols ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' fi ;; uts4*) archive_cmds='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' hardcode_libdir_flag_spec='-L$libdir' hardcode_shlibpath_var=no ;; *) ld_shlibs=no ;; esac if test x$host_vendor = xsni; then case $host in sysv4 | sysv4.2uw2* | sysv4.3* | sysv5*) export_dynamic_flag_spec='${wl}-Blargedynsym' ;; esac fi fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ld_shlibs" >&5 $as_echo "$ld_shlibs" >&6; } test "$ld_shlibs" = no && can_build_shared=no with_gnu_ld=$with_gnu_ld # # Do we need to explicitly link libc? # case "x$archive_cmds_need_lc" in x|xyes) # Assume -lc should be added archive_cmds_need_lc=yes if test "$enable_shared" = yes && test "$GCC" = yes; then case $archive_cmds in *'~'*) # FIXME: we may have to deal with multi-command sequences. ;; '$CC '*) # Test whether the compiler implicitly links with -lc since on some # systems, -lgcc has to come before -lc. If gcc already passes -lc # to ld, don't add -lc before -lgcc. { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether -lc should be explicitly linked in" >&5 $as_echo_n "checking whether -lc should be explicitly linked in... " >&6; } if ${lt_cv_archive_cmds_need_lc+:} false; then : $as_echo_n "(cached) " >&6 else $RM conftest* echo "$lt_simple_compile_test_code" > conftest.$ac_ext if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_compile\""; } >&5 (eval $ac_compile) 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; } 2>conftest.err; then soname=conftest lib=conftest libobjs=conftest.$ac_objext deplibs= wl=$lt_prog_compiler_wl pic_flag=$lt_prog_compiler_pic compiler_flags=-v linker_flags=-v verstring= output_objdir=. libname=conftest lt_save_allow_undefined_flag=$allow_undefined_flag allow_undefined_flag= if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$archive_cmds 2\>\&1 \| $GREP \" -lc \" \>/dev/null 2\>\&1\""; } >&5 (eval $archive_cmds 2\>\&1 \| $GREP \" -lc \" \>/dev/null 2\>\&1) 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; } then lt_cv_archive_cmds_need_lc=no else lt_cv_archive_cmds_need_lc=yes fi allow_undefined_flag=$lt_save_allow_undefined_flag else cat conftest.err 1>&5 fi $RM conftest* fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_archive_cmds_need_lc" >&5 $as_echo "$lt_cv_archive_cmds_need_lc" >&6; } archive_cmds_need_lc=$lt_cv_archive_cmds_need_lc ;; esac fi ;; esac { $as_echo "$as_me:${as_lineno-$LINENO}: checking dynamic linker characteristics" >&5 $as_echo_n "checking dynamic linker characteristics... " >&6; } if test "$GCC" = yes; then case $host_os in darwin*) lt_awk_arg="/^libraries:/,/LR/" ;; *) lt_awk_arg="/^libraries:/" ;; esac case $host_os in mingw* | cegcc*) lt_sed_strip_eq="s,=\([A-Za-z]:\),\1,g" ;; *) lt_sed_strip_eq="s,=/,/,g" ;; esac lt_search_path_spec=`$CC -print-search-dirs | awk $lt_awk_arg | $SED -e "s/^libraries://" -e $lt_sed_strip_eq` case $lt_search_path_spec in *\;*) # if the path contains ";" then we assume it to be the separator # otherwise default to the standard path separator (i.e. ":") - it is # assumed that no part of a normal pathname contains ";" but that should # okay in the real world where ";" in dirpaths is itself problematic. lt_search_path_spec=`$ECHO "$lt_search_path_spec" | $SED 's/;/ /g'` ;; *) lt_search_path_spec=`$ECHO "$lt_search_path_spec" | $SED "s/$PATH_SEPARATOR/ /g"` ;; esac # Ok, now we have the path, separated by spaces, we can step through it # and add multilib dir if necessary. lt_tmp_lt_search_path_spec= lt_multi_os_dir=`$CC $CPPFLAGS $CFLAGS $LDFLAGS -print-multi-os-directory 2>/dev/null` for lt_sys_path in $lt_search_path_spec; do if test -d "$lt_sys_path/$lt_multi_os_dir"; then lt_tmp_lt_search_path_spec="$lt_tmp_lt_search_path_spec $lt_sys_path/$lt_multi_os_dir" else test -d "$lt_sys_path" && \ lt_tmp_lt_search_path_spec="$lt_tmp_lt_search_path_spec $lt_sys_path" fi done lt_search_path_spec=`$ECHO "$lt_tmp_lt_search_path_spec" | awk ' BEGIN {RS=" "; FS="/|\n";} { lt_foo=""; lt_count=0; for (lt_i = NF; lt_i > 0; lt_i--) { if ($lt_i != "" && $lt_i != ".") { if ($lt_i == "..") { lt_count++; } else { if (lt_count == 0) { lt_foo="/" $lt_i lt_foo; } else { lt_count--; } } } } if (lt_foo != "") { lt_freq[lt_foo]++; } if (lt_freq[lt_foo] == 1) { print lt_foo; } }'` # AWK program above erroneously prepends '/' to C:/dos/paths # for these hosts. case $host_os in mingw* | cegcc*) lt_search_path_spec=`$ECHO "$lt_search_path_spec" |\ $SED 's,/\([A-Za-z]:\),\1,g'` ;; esac sys_lib_search_path_spec=`$ECHO "$lt_search_path_spec" | $lt_NL2SP` else sys_lib_search_path_spec="/lib /usr/lib /usr/local/lib" fi library_names_spec= libname_spec='lib$name' soname_spec= shrext_cmds=".so" postinstall_cmds= postuninstall_cmds= finish_cmds= finish_eval= shlibpath_var= shlibpath_overrides_runpath=unknown version_type=none dynamic_linker="$host_os ld.so" sys_lib_dlsearch_path_spec="/lib /usr/lib" need_lib_prefix=unknown hardcode_into_libs=no # when you set need_version to no, make sure it does not cause -set_version # flags to be left without arguments need_version=unknown case $host_os in aix3*) version_type=linux # correct to gnu/linux during the next big refactor library_names_spec='${libname}${release}${shared_ext}$versuffix $libname.a' shlibpath_var=LIBPATH # AIX 3 has no versioning support, so we append a major version to the name. soname_spec='${libname}${release}${shared_ext}$major' ;; aix[4-9]*) version_type=linux # correct to gnu/linux during the next big refactor need_lib_prefix=no need_version=no hardcode_into_libs=yes if test "$host_cpu" = ia64; then # AIX 5 supports IA64 library_names_spec='${libname}${release}${shared_ext}$major ${libname}${release}${shared_ext}$versuffix $libname${shared_ext}' shlibpath_var=LD_LIBRARY_PATH else # With GCC up to 2.95.x, collect2 would create an import file # for dependence libraries. The import file would start with # the line `#! .'. This would cause the generated library to # depend on `.', always an invalid library. This was fixed in # development snapshots of GCC prior to 3.0. case $host_os in aix4 | aix4.[01] | aix4.[01].*) if { echo '#if __GNUC__ > 2 || (__GNUC__ == 2 && __GNUC_MINOR__ >= 97)' echo ' yes ' echo '#endif'; } | ${CC} -E - | $GREP yes > /dev/null; then : else can_build_shared=no fi ;; esac # AIX (on Power*) has no versioning support, so currently we can not hardcode correct # soname into executable. Probably we can add versioning support to # collect2, so additional links can be useful in future. if test "$aix_use_runtimelinking" = yes; then # If using run time linking (on AIX 4.2 or later) use lib.so # instead of lib.a to let people know that these are not # typical AIX shared libraries. library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' else # We preserve .a as extension for shared libraries through AIX4.2 # and later when we are not doing run time linking. library_names_spec='${libname}${release}.a $libname.a' soname_spec='${libname}${release}${shared_ext}$major' fi shlibpath_var=LIBPATH fi ;; amigaos*) case $host_cpu in powerpc) # Since July 2007 AmigaOS4 officially supports .so libraries. # When compiling the executable, add -use-dynld -Lsobjs: to the compileline. library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' ;; m68k) library_names_spec='$libname.ixlibrary $libname.a' # Create ${libname}_ixlibrary.a entries in /sys/libs. finish_eval='for lib in `ls $libdir/*.ixlibrary 2>/dev/null`; do libname=`func_echo_all "$lib" | $SED '\''s%^.*/\([^/]*\)\.ixlibrary$%\1%'\''`; test $RM /sys/libs/${libname}_ixlibrary.a; $show "cd /sys/libs && $LN_S $lib ${libname}_ixlibrary.a"; cd /sys/libs && $LN_S $lib ${libname}_ixlibrary.a || exit 1; done' ;; esac ;; beos*) library_names_spec='${libname}${shared_ext}' dynamic_linker="$host_os ld.so" shlibpath_var=LIBRARY_PATH ;; bsdi[45]*) version_type=linux # correct to gnu/linux during the next big refactor need_version=no library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' soname_spec='${libname}${release}${shared_ext}$major' finish_cmds='PATH="\$PATH:/sbin" ldconfig $libdir' shlibpath_var=LD_LIBRARY_PATH sys_lib_search_path_spec="/shlib /usr/lib /usr/X11/lib /usr/contrib/lib /lib /usr/local/lib" sys_lib_dlsearch_path_spec="/shlib /usr/lib /usr/local/lib" # the default ld.so.conf also contains /usr/contrib/lib and # /usr/X11R6/lib (/usr/X11 is a link to /usr/X11R6), but let us allow # libtool to hard-code these into programs ;; cygwin* | mingw* | pw32* | cegcc*) version_type=windows shrext_cmds=".dll" need_version=no need_lib_prefix=no case $GCC,$cc_basename in yes,*) # gcc library_names_spec='$libname.dll.a' # DLL is installed to $(libdir)/../bin by postinstall_cmds postinstall_cmds='base_file=`basename \${file}`~ dlpath=`$SHELL 2>&1 -c '\''. $dir/'\''\${base_file}'\''i; echo \$dlname'\''`~ dldir=$destdir/`dirname \$dlpath`~ test -d \$dldir || mkdir -p \$dldir~ $install_prog $dir/$dlname \$dldir/$dlname~ chmod a+x \$dldir/$dlname~ if test -n '\''$stripme'\'' && test -n '\''$striplib'\''; then eval '\''$striplib \$dldir/$dlname'\'' || exit \$?; fi' postuninstall_cmds='dldll=`$SHELL 2>&1 -c '\''. $file; echo \$dlname'\''`~ dlpath=$dir/\$dldll~ $RM \$dlpath' shlibpath_overrides_runpath=yes case $host_os in cygwin*) # Cygwin DLLs use 'cyg' prefix rather than 'lib' soname_spec='`echo ${libname} | sed -e 's/^lib/cyg/'``echo ${release} | $SED -e 's/[.]/-/g'`${versuffix}${shared_ext}' sys_lib_search_path_spec="$sys_lib_search_path_spec /usr/lib/w32api" ;; mingw* | cegcc*) # MinGW DLLs use traditional 'lib' prefix soname_spec='${libname}`echo ${release} | $SED -e 's/[.]/-/g'`${versuffix}${shared_ext}' ;; pw32*) # pw32 DLLs use 'pw' prefix rather than 'lib' library_names_spec='`echo ${libname} | sed -e 's/^lib/pw/'``echo ${release} | $SED -e 's/[.]/-/g'`${versuffix}${shared_ext}' ;; esac dynamic_linker='Win32 ld.exe' ;; *,cl*) # Native MSVC libname_spec='$name' soname_spec='${libname}`echo ${release} | $SED -e 's/[.]/-/g'`${versuffix}${shared_ext}' library_names_spec='${libname}.dll.lib' case $build_os in mingw*) sys_lib_search_path_spec= lt_save_ifs=$IFS IFS=';' for lt_path in $LIB do IFS=$lt_save_ifs # Let DOS variable expansion print the short 8.3 style file name. lt_path=`cd "$lt_path" 2>/dev/null && cmd //C "for %i in (".") do @echo %~si"` sys_lib_search_path_spec="$sys_lib_search_path_spec $lt_path" done IFS=$lt_save_ifs # Convert to MSYS style. sys_lib_search_path_spec=`$ECHO "$sys_lib_search_path_spec" | sed -e 's|\\\\|/|g' -e 's| \\([a-zA-Z]\\):| /\\1|g' -e 's|^ ||'` ;; cygwin*) # Convert to unix form, then to dos form, then back to unix form # but this time dos style (no spaces!) so that the unix form looks # like /cygdrive/c/PROGRA~1:/cygdr... sys_lib_search_path_spec=`cygpath --path --unix "$LIB"` sys_lib_search_path_spec=`cygpath --path --dos "$sys_lib_search_path_spec" 2>/dev/null` sys_lib_search_path_spec=`cygpath --path --unix "$sys_lib_search_path_spec" | $SED -e "s/$PATH_SEPARATOR/ /g"` ;; *) sys_lib_search_path_spec="$LIB" if $ECHO "$sys_lib_search_path_spec" | $GREP ';[c-zC-Z]:/' >/dev/null; then # It is most probably a Windows format PATH. sys_lib_search_path_spec=`$ECHO "$sys_lib_search_path_spec" | $SED -e 's/;/ /g'` else sys_lib_search_path_spec=`$ECHO "$sys_lib_search_path_spec" | $SED -e "s/$PATH_SEPARATOR/ /g"` fi # FIXME: find the short name or the path components, as spaces are # common. (e.g. "Program Files" -> "PROGRA~1") ;; esac # DLL is installed to $(libdir)/../bin by postinstall_cmds postinstall_cmds='base_file=`basename \${file}`~ dlpath=`$SHELL 2>&1 -c '\''. $dir/'\''\${base_file}'\''i; echo \$dlname'\''`~ dldir=$destdir/`dirname \$dlpath`~ test -d \$dldir || mkdir -p \$dldir~ $install_prog $dir/$dlname \$dldir/$dlname' postuninstall_cmds='dldll=`$SHELL 2>&1 -c '\''. $file; echo \$dlname'\''`~ dlpath=$dir/\$dldll~ $RM \$dlpath' shlibpath_overrides_runpath=yes dynamic_linker='Win32 link.exe' ;; *) # Assume MSVC wrapper library_names_spec='${libname}`echo ${release} | $SED -e 's/[.]/-/g'`${versuffix}${shared_ext} $libname.lib' dynamic_linker='Win32 ld.exe' ;; esac # FIXME: first we should search . and the directory the executable is in shlibpath_var=PATH ;; darwin* | rhapsody*) dynamic_linker="$host_os dyld" version_type=darwin need_lib_prefix=no need_version=no library_names_spec='${libname}${release}${major}$shared_ext ${libname}$shared_ext' soname_spec='${libname}${release}${major}$shared_ext' shlibpath_overrides_runpath=yes shlibpath_var=DYLD_LIBRARY_PATH shrext_cmds='`test .$module = .yes && echo .so || echo .dylib`' sys_lib_search_path_spec="$sys_lib_search_path_spec /usr/local/lib" sys_lib_dlsearch_path_spec='/usr/local/lib /lib /usr/lib' ;; dgux*) version_type=linux # correct to gnu/linux during the next big refactor need_lib_prefix=no need_version=no library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname$shared_ext' soname_spec='${libname}${release}${shared_ext}$major' shlibpath_var=LD_LIBRARY_PATH ;; freebsd* | dragonfly*) # DragonFly does not have aout. When/if they implement a new # versioning mechanism, adjust this. if test -x /usr/bin/objformat; then objformat=`/usr/bin/objformat` else case $host_os in freebsd[23].*) objformat=aout ;; *) objformat=elf ;; esac fi version_type=freebsd-$objformat case $version_type in freebsd-elf*) library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext} $libname${shared_ext}' need_version=no need_lib_prefix=no ;; freebsd-*) library_names_spec='${libname}${release}${shared_ext}$versuffix $libname${shared_ext}$versuffix' need_version=yes ;; esac shlibpath_var=LD_LIBRARY_PATH case $host_os in freebsd2.*) shlibpath_overrides_runpath=yes ;; freebsd3.[01]* | freebsdelf3.[01]*) shlibpath_overrides_runpath=yes hardcode_into_libs=yes ;; freebsd3.[2-9]* | freebsdelf3.[2-9]* | \ freebsd4.[0-5] | freebsdelf4.[0-5] | freebsd4.1.1 | freebsdelf4.1.1) shlibpath_overrides_runpath=no hardcode_into_libs=yes ;; *) # from 4.6 on, and DragonFly shlibpath_overrides_runpath=yes hardcode_into_libs=yes ;; esac ;; gnu*) version_type=linux # correct to gnu/linux during the next big refactor need_lib_prefix=no need_version=no library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}${major} ${libname}${shared_ext}' soname_spec='${libname}${release}${shared_ext}$major' shlibpath_var=LD_LIBRARY_PATH shlibpath_overrides_runpath=no hardcode_into_libs=yes ;; haiku*) version_type=linux # correct to gnu/linux during the next big refactor need_lib_prefix=no need_version=no dynamic_linker="$host_os runtime_loader" library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}${major} ${libname}${shared_ext}' soname_spec='${libname}${release}${shared_ext}$major' shlibpath_var=LIBRARY_PATH shlibpath_overrides_runpath=yes sys_lib_dlsearch_path_spec='/boot/home/config/lib /boot/common/lib /boot/system/lib' hardcode_into_libs=yes ;; hpux9* | hpux10* | hpux11*) # Give a soname corresponding to the major version so that dld.sl refuses to # link against other versions. version_type=sunos need_lib_prefix=no need_version=no case $host_cpu in ia64*) shrext_cmds='.so' hardcode_into_libs=yes dynamic_linker="$host_os dld.so" shlibpath_var=LD_LIBRARY_PATH shlibpath_overrides_runpath=yes # Unless +noenvvar is specified. library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' soname_spec='${libname}${release}${shared_ext}$major' if test "X$HPUX_IA64_MODE" = X32; then sys_lib_search_path_spec="/usr/lib/hpux32 /usr/local/lib/hpux32 /usr/local/lib" else sys_lib_search_path_spec="/usr/lib/hpux64 /usr/local/lib/hpux64" fi sys_lib_dlsearch_path_spec=$sys_lib_search_path_spec ;; hppa*64*) shrext_cmds='.sl' hardcode_into_libs=yes dynamic_linker="$host_os dld.sl" shlibpath_var=LD_LIBRARY_PATH # How should we handle SHLIB_PATH shlibpath_overrides_runpath=yes # Unless +noenvvar is specified. library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' soname_spec='${libname}${release}${shared_ext}$major' sys_lib_search_path_spec="/usr/lib/pa20_64 /usr/ccs/lib/pa20_64" sys_lib_dlsearch_path_spec=$sys_lib_search_path_spec ;; *) shrext_cmds='.sl' dynamic_linker="$host_os dld.sl" shlibpath_var=SHLIB_PATH shlibpath_overrides_runpath=no # +s is required to enable SHLIB_PATH library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' soname_spec='${libname}${release}${shared_ext}$major' ;; esac # HP-UX runs *really* slowly unless shared libraries are mode 555, ... postinstall_cmds='chmod 555 $lib' # or fails outright, so override atomically: install_override_mode=555 ;; interix[3-9]*) version_type=linux # correct to gnu/linux during the next big refactor need_lib_prefix=no need_version=no library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major ${libname}${shared_ext}' soname_spec='${libname}${release}${shared_ext}$major' dynamic_linker='Interix 3.x ld.so.1 (PE, like ELF)' shlibpath_var=LD_LIBRARY_PATH shlibpath_overrides_runpath=no hardcode_into_libs=yes ;; irix5* | irix6* | nonstopux*) case $host_os in nonstopux*) version_type=nonstopux ;; *) if test "$lt_cv_prog_gnu_ld" = yes; then version_type=linux # correct to gnu/linux during the next big refactor else version_type=irix fi ;; esac need_lib_prefix=no need_version=no soname_spec='${libname}${release}${shared_ext}$major' library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major ${libname}${release}${shared_ext} $libname${shared_ext}' case $host_os in irix5* | nonstopux*) libsuff= shlibsuff= ;; *) case $LD in # libtool.m4 will add one of these switches to LD *-32|*"-32 "|*-melf32bsmip|*"-melf32bsmip ") libsuff= shlibsuff= libmagic=32-bit;; *-n32|*"-n32 "|*-melf32bmipn32|*"-melf32bmipn32 ") libsuff=32 shlibsuff=N32 libmagic=N32;; *-64|*"-64 "|*-melf64bmip|*"-melf64bmip ") libsuff=64 shlibsuff=64 libmagic=64-bit;; *) libsuff= shlibsuff= libmagic=never-match;; esac ;; esac shlibpath_var=LD_LIBRARY${shlibsuff}_PATH shlibpath_overrides_runpath=no sys_lib_search_path_spec="/usr/lib${libsuff} /lib${libsuff} /usr/local/lib${libsuff}" sys_lib_dlsearch_path_spec="/usr/lib${libsuff} /lib${libsuff}" hardcode_into_libs=yes ;; # No shared lib support for Linux oldld, aout, or coff. linux*oldld* | linux*aout* | linux*coff*) dynamic_linker=no ;; # This must be glibc/ELF. linux* | k*bsd*-gnu | kopensolaris*-gnu) version_type=linux # correct to gnu/linux during the next big refactor need_lib_prefix=no need_version=no library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' soname_spec='${libname}${release}${shared_ext}$major' finish_cmds='PATH="\$PATH:/sbin" ldconfig -n $libdir' shlibpath_var=LD_LIBRARY_PATH shlibpath_overrides_runpath=no # Some binutils ld are patched to set DT_RUNPATH if ${lt_cv_shlibpath_overrides_runpath+:} false; then : $as_echo_n "(cached) " >&6 else lt_cv_shlibpath_overrides_runpath=no save_LDFLAGS=$LDFLAGS save_libdir=$libdir eval "libdir=/foo; wl=\"$lt_prog_compiler_wl\"; \ LDFLAGS=\"\$LDFLAGS $hardcode_libdir_flag_spec\"" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : if ($OBJDUMP -p conftest$ac_exeext) 2>/dev/null | grep "RUNPATH.*$libdir" >/dev/null; then : lt_cv_shlibpath_overrides_runpath=yes fi fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext LDFLAGS=$save_LDFLAGS libdir=$save_libdir fi shlibpath_overrides_runpath=$lt_cv_shlibpath_overrides_runpath # This implies no fast_install, which is unacceptable. # Some rework will be needed to allow for fast_install # before this can be enabled. hardcode_into_libs=yes # Add ABI-specific directories to the system library path. sys_lib_dlsearch_path_spec="/lib64 /usr/lib64 /lib /usr/lib" # Append ld.so.conf contents to the search path if test -f /etc/ld.so.conf; then lt_ld_extra=`awk '/^include / { system(sprintf("cd /etc; cat %s 2>/dev/null", \$2)); skip = 1; } { if (!skip) print \$0; skip = 0; }' < /etc/ld.so.conf | $SED -e 's/#.*//;/^[ ]*hwcap[ ]/d;s/[:, ]/ /g;s/=[^=]*$//;s/=[^= ]* / /g;s/"//g;/^$/d' | tr '\n' ' '` sys_lib_dlsearch_path_spec="$sys_lib_dlsearch_path_spec $lt_ld_extra" fi # We used to test for /lib/ld.so.1 and disable shared libraries on # powerpc, because MkLinux only supported shared libraries with the # GNU dynamic linker. Since this was broken with cross compilers, # most powerpc-linux boxes support dynamic linking these days and # people can always --disable-shared, the test was removed, and we # assume the GNU/Linux dynamic linker is in use. dynamic_linker='GNU/Linux ld.so' ;; netbsd*) version_type=sunos need_lib_prefix=no need_version=no if echo __ELF__ | $CC -E - | $GREP __ELF__ >/dev/null; then library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${shared_ext}$versuffix' finish_cmds='PATH="\$PATH:/sbin" ldconfig -m $libdir' dynamic_linker='NetBSD (a.out) ld.so' else library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major ${libname}${shared_ext}' soname_spec='${libname}${release}${shared_ext}$major' dynamic_linker='NetBSD ld.elf_so' fi shlibpath_var=LD_LIBRARY_PATH shlibpath_overrides_runpath=yes hardcode_into_libs=yes ;; newsos6) version_type=linux # correct to gnu/linux during the next big refactor library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' shlibpath_var=LD_LIBRARY_PATH shlibpath_overrides_runpath=yes ;; *nto* | *qnx*) version_type=qnx need_lib_prefix=no need_version=no library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' soname_spec='${libname}${release}${shared_ext}$major' shlibpath_var=LD_LIBRARY_PATH shlibpath_overrides_runpath=no hardcode_into_libs=yes dynamic_linker='ldqnx.so' ;; openbsd*) version_type=sunos sys_lib_dlsearch_path_spec="/usr/lib" need_lib_prefix=no # Some older versions of OpenBSD (3.3 at least) *do* need versioned libs. case $host_os in openbsd3.3 | openbsd3.3.*) need_version=yes ;; *) need_version=no ;; esac library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${shared_ext}$versuffix' finish_cmds='PATH="\$PATH:/sbin" ldconfig -m $libdir' shlibpath_var=LD_LIBRARY_PATH if test -z "`echo __ELF__ | $CC -E - | $GREP __ELF__`" || test "$host_os-$host_cpu" = "openbsd2.8-powerpc"; then case $host_os in openbsd2.[89] | openbsd2.[89].*) shlibpath_overrides_runpath=no ;; *) shlibpath_overrides_runpath=yes ;; esac else shlibpath_overrides_runpath=yes fi ;; os2*) libname_spec='$name' shrext_cmds=".dll" need_lib_prefix=no library_names_spec='$libname${shared_ext} $libname.a' dynamic_linker='OS/2 ld.exe' shlibpath_var=LIBPATH ;; osf3* | osf4* | osf5*) version_type=osf need_lib_prefix=no need_version=no soname_spec='${libname}${release}${shared_ext}$major' library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' shlibpath_var=LD_LIBRARY_PATH sys_lib_search_path_spec="/usr/shlib /usr/ccs/lib /usr/lib/cmplrs/cc /usr/lib /usr/local/lib /var/shlib" sys_lib_dlsearch_path_spec="$sys_lib_search_path_spec" ;; rdos*) dynamic_linker=no ;; solaris*) version_type=linux # correct to gnu/linux during the next big refactor need_lib_prefix=no need_version=no library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' soname_spec='${libname}${release}${shared_ext}$major' shlibpath_var=LD_LIBRARY_PATH shlibpath_overrides_runpath=yes hardcode_into_libs=yes # ldd complains unless libraries are executable postinstall_cmds='chmod +x $lib' ;; sunos4*) version_type=sunos library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${shared_ext}$versuffix' finish_cmds='PATH="\$PATH:/usr/etc" ldconfig $libdir' shlibpath_var=LD_LIBRARY_PATH shlibpath_overrides_runpath=yes if test "$with_gnu_ld" = yes; then need_lib_prefix=no fi need_version=yes ;; sysv4 | sysv4.3*) version_type=linux # correct to gnu/linux during the next big refactor library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' soname_spec='${libname}${release}${shared_ext}$major' shlibpath_var=LD_LIBRARY_PATH case $host_vendor in sni) shlibpath_overrides_runpath=no need_lib_prefix=no runpath_var=LD_RUN_PATH ;; siemens) need_lib_prefix=no ;; motorola) need_lib_prefix=no need_version=no shlibpath_overrides_runpath=no sys_lib_search_path_spec='/lib /usr/lib /usr/ccs/lib' ;; esac ;; sysv4*MP*) if test -d /usr/nec ;then version_type=linux # correct to gnu/linux during the next big refactor library_names_spec='$libname${shared_ext}.$versuffix $libname${shared_ext}.$major $libname${shared_ext}' soname_spec='$libname${shared_ext}.$major' shlibpath_var=LD_LIBRARY_PATH fi ;; sysv5* | sco3.2v5* | sco5v6* | unixware* | OpenUNIX* | sysv4*uw2*) version_type=freebsd-elf need_lib_prefix=no need_version=no library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext} $libname${shared_ext}' soname_spec='${libname}${release}${shared_ext}$major' shlibpath_var=LD_LIBRARY_PATH shlibpath_overrides_runpath=yes hardcode_into_libs=yes if test "$with_gnu_ld" = yes; then sys_lib_search_path_spec='/usr/local/lib /usr/gnu/lib /usr/ccs/lib /usr/lib /lib' else sys_lib_search_path_spec='/usr/ccs/lib /usr/lib' case $host_os in sco3.2v5*) sys_lib_search_path_spec="$sys_lib_search_path_spec /lib" ;; esac fi sys_lib_dlsearch_path_spec='/usr/lib' ;; tpf*) # TPF is a cross-target only. Preferred cross-host = GNU/Linux. version_type=linux # correct to gnu/linux during the next big refactor need_lib_prefix=no need_version=no library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' shlibpath_var=LD_LIBRARY_PATH shlibpath_overrides_runpath=no hardcode_into_libs=yes ;; uts4*) version_type=linux # correct to gnu/linux during the next big refactor library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' soname_spec='${libname}${release}${shared_ext}$major' shlibpath_var=LD_LIBRARY_PATH ;; *) dynamic_linker=no ;; esac { $as_echo "$as_me:${as_lineno-$LINENO}: result: $dynamic_linker" >&5 $as_echo "$dynamic_linker" >&6; } test "$dynamic_linker" = no && can_build_shared=no variables_saved_for_relink="PATH $shlibpath_var $runpath_var" if test "$GCC" = yes; then variables_saved_for_relink="$variables_saved_for_relink GCC_EXEC_PREFIX COMPILER_PATH LIBRARY_PATH" fi if test "${lt_cv_sys_lib_search_path_spec+set}" = set; then sys_lib_search_path_spec="$lt_cv_sys_lib_search_path_spec" fi if test "${lt_cv_sys_lib_dlsearch_path_spec+set}" = set; then sys_lib_dlsearch_path_spec="$lt_cv_sys_lib_dlsearch_path_spec" fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking how to hardcode library paths into programs" >&5 $as_echo_n "checking how to hardcode library paths into programs... " >&6; } hardcode_action= if test -n "$hardcode_libdir_flag_spec" || test -n "$runpath_var" || test "X$hardcode_automatic" = "Xyes" ; then # We can hardcode non-existent directories. if test "$hardcode_direct" != no && # If the only mechanism to avoid hardcoding is shlibpath_var, we # have to relink, otherwise we might link with an installed library # when we should be linking with a yet-to-be-installed one ## test "$_LT_TAGVAR(hardcode_shlibpath_var, )" != no && test "$hardcode_minus_L" != no; then # Linking always hardcodes the temporary library directory. hardcode_action=relink else # We can link without hardcoding, and we can hardcode nonexisting dirs. hardcode_action=immediate fi else # We cannot hardcode anything, or else we can only hardcode existing # directories. hardcode_action=unsupported fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $hardcode_action" >&5 $as_echo "$hardcode_action" >&6; } if test "$hardcode_action" = relink || test "$inherit_rpath" = yes; then # Fast installation is not supported enable_fast_install=no elif test "$shlibpath_overrides_runpath" = yes || test "$enable_shared" = no; then # Fast installation is not necessary enable_fast_install=needless fi if test "x$enable_dlopen" != xyes; then enable_dlopen=unknown enable_dlopen_self=unknown enable_dlopen_self_static=unknown else lt_cv_dlopen=no lt_cv_dlopen_libs= case $host_os in beos*) lt_cv_dlopen="load_add_on" lt_cv_dlopen_libs= lt_cv_dlopen_self=yes ;; mingw* | pw32* | cegcc*) lt_cv_dlopen="LoadLibrary" lt_cv_dlopen_libs= ;; cygwin*) lt_cv_dlopen="dlopen" lt_cv_dlopen_libs= ;; darwin*) # if libdl is installed we need to link against it { $as_echo "$as_me:${as_lineno-$LINENO}: checking for dlopen in -ldl" >&5 $as_echo_n "checking for dlopen in -ldl... " >&6; } if ${ac_cv_lib_dl_dlopen+:} false; then : $as_echo_n "(cached) " >&6 else ac_check_lib_save_LIBS=$LIBS LIBS="-ldl $LIBS" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char dlopen (); int main () { return dlopen (); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : ac_cv_lib_dl_dlopen=yes else ac_cv_lib_dl_dlopen=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext LIBS=$ac_check_lib_save_LIBS fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_dl_dlopen" >&5 $as_echo "$ac_cv_lib_dl_dlopen" >&6; } if test "x$ac_cv_lib_dl_dlopen" = xyes; then : lt_cv_dlopen="dlopen" lt_cv_dlopen_libs="-ldl" else lt_cv_dlopen="dyld" lt_cv_dlopen_libs= lt_cv_dlopen_self=yes fi ;; *) ac_fn_c_check_func "$LINENO" "shl_load" "ac_cv_func_shl_load" if test "x$ac_cv_func_shl_load" = xyes; then : lt_cv_dlopen="shl_load" else { $as_echo "$as_me:${as_lineno-$LINENO}: checking for shl_load in -ldld" >&5 $as_echo_n "checking for shl_load in -ldld... " >&6; } if ${ac_cv_lib_dld_shl_load+:} false; then : $as_echo_n "(cached) " >&6 else ac_check_lib_save_LIBS=$LIBS LIBS="-ldld $LIBS" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char shl_load (); int main () { return shl_load (); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : ac_cv_lib_dld_shl_load=yes else ac_cv_lib_dld_shl_load=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext LIBS=$ac_check_lib_save_LIBS fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_dld_shl_load" >&5 $as_echo "$ac_cv_lib_dld_shl_load" >&6; } if test "x$ac_cv_lib_dld_shl_load" = xyes; then : lt_cv_dlopen="shl_load" lt_cv_dlopen_libs="-ldld" else ac_fn_c_check_func "$LINENO" "dlopen" "ac_cv_func_dlopen" if test "x$ac_cv_func_dlopen" = xyes; then : lt_cv_dlopen="dlopen" else { $as_echo "$as_me:${as_lineno-$LINENO}: checking for dlopen in -ldl" >&5 $as_echo_n "checking for dlopen in -ldl... " >&6; } if ${ac_cv_lib_dl_dlopen+:} false; then : $as_echo_n "(cached) " >&6 else ac_check_lib_save_LIBS=$LIBS LIBS="-ldl $LIBS" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char dlopen (); int main () { return dlopen (); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : ac_cv_lib_dl_dlopen=yes else ac_cv_lib_dl_dlopen=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext LIBS=$ac_check_lib_save_LIBS fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_dl_dlopen" >&5 $as_echo "$ac_cv_lib_dl_dlopen" >&6; } if test "x$ac_cv_lib_dl_dlopen" = xyes; then : lt_cv_dlopen="dlopen" lt_cv_dlopen_libs="-ldl" else { $as_echo "$as_me:${as_lineno-$LINENO}: checking for dlopen in -lsvld" >&5 $as_echo_n "checking for dlopen in -lsvld... " >&6; } if ${ac_cv_lib_svld_dlopen+:} false; then : $as_echo_n "(cached) " >&6 else ac_check_lib_save_LIBS=$LIBS LIBS="-lsvld $LIBS" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char dlopen (); int main () { return dlopen (); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : ac_cv_lib_svld_dlopen=yes else ac_cv_lib_svld_dlopen=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext LIBS=$ac_check_lib_save_LIBS fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_svld_dlopen" >&5 $as_echo "$ac_cv_lib_svld_dlopen" >&6; } if test "x$ac_cv_lib_svld_dlopen" = xyes; then : lt_cv_dlopen="dlopen" lt_cv_dlopen_libs="-lsvld" else { $as_echo "$as_me:${as_lineno-$LINENO}: checking for dld_link in -ldld" >&5 $as_echo_n "checking for dld_link in -ldld... " >&6; } if ${ac_cv_lib_dld_dld_link+:} false; then : $as_echo_n "(cached) " >&6 else ac_check_lib_save_LIBS=$LIBS LIBS="-ldld $LIBS" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char dld_link (); int main () { return dld_link (); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : ac_cv_lib_dld_dld_link=yes else ac_cv_lib_dld_dld_link=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext LIBS=$ac_check_lib_save_LIBS fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_dld_dld_link" >&5 $as_echo "$ac_cv_lib_dld_dld_link" >&6; } if test "x$ac_cv_lib_dld_dld_link" = xyes; then : lt_cv_dlopen="dld_link" lt_cv_dlopen_libs="-ldld" fi fi fi fi fi fi ;; esac if test "x$lt_cv_dlopen" != xno; then enable_dlopen=yes else enable_dlopen=no fi case $lt_cv_dlopen in dlopen) save_CPPFLAGS="$CPPFLAGS" test "x$ac_cv_header_dlfcn_h" = xyes && CPPFLAGS="$CPPFLAGS -DHAVE_DLFCN_H" save_LDFLAGS="$LDFLAGS" wl=$lt_prog_compiler_wl eval LDFLAGS=\"\$LDFLAGS $export_dynamic_flag_spec\" save_LIBS="$LIBS" LIBS="$lt_cv_dlopen_libs $LIBS" { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether a program can dlopen itself" >&5 $as_echo_n "checking whether a program can dlopen itself... " >&6; } if ${lt_cv_dlopen_self+:} false; then : $as_echo_n "(cached) " >&6 else if test "$cross_compiling" = yes; then : lt_cv_dlopen_self=cross else lt_dlunknown=0; lt_dlno_uscore=1; lt_dlneed_uscore=2 lt_status=$lt_dlunknown cat > conftest.$ac_ext <<_LT_EOF #line $LINENO "configure" #include "confdefs.h" #if HAVE_DLFCN_H #include #endif #include #ifdef RTLD_GLOBAL # define LT_DLGLOBAL RTLD_GLOBAL #else # ifdef DL_GLOBAL # define LT_DLGLOBAL DL_GLOBAL # else # define LT_DLGLOBAL 0 # endif #endif /* We may have to define LT_DLLAZY_OR_NOW in the command line if we find out it does not work in some platform. */ #ifndef LT_DLLAZY_OR_NOW # ifdef RTLD_LAZY # define LT_DLLAZY_OR_NOW RTLD_LAZY # else # ifdef DL_LAZY # define LT_DLLAZY_OR_NOW DL_LAZY # else # ifdef RTLD_NOW # define LT_DLLAZY_OR_NOW RTLD_NOW # else # ifdef DL_NOW # define LT_DLLAZY_OR_NOW DL_NOW # else # define LT_DLLAZY_OR_NOW 0 # endif # endif # endif # endif #endif /* When -fvisbility=hidden is used, assume the code has been annotated correspondingly for the symbols needed. */ #if defined(__GNUC__) && (((__GNUC__ == 3) && (__GNUC_MINOR__ >= 3)) || (__GNUC__ > 3)) int fnord () __attribute__((visibility("default"))); #endif int fnord () { return 42; } int main () { void *self = dlopen (0, LT_DLGLOBAL|LT_DLLAZY_OR_NOW); int status = $lt_dlunknown; if (self) { if (dlsym (self,"fnord")) status = $lt_dlno_uscore; else { if (dlsym( self,"_fnord")) status = $lt_dlneed_uscore; else puts (dlerror ()); } /* dlclose (self); */ } else puts (dlerror ()); return status; } _LT_EOF if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_link\""; } >&5 (eval $ac_link) 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; } && test -s conftest${ac_exeext} 2>/dev/null; then (./conftest; exit; ) >&5 2>/dev/null lt_status=$? case x$lt_status in x$lt_dlno_uscore) lt_cv_dlopen_self=yes ;; x$lt_dlneed_uscore) lt_cv_dlopen_self=yes ;; x$lt_dlunknown|x*) lt_cv_dlopen_self=no ;; esac else : # compilation failed lt_cv_dlopen_self=no fi fi rm -fr conftest* fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_dlopen_self" >&5 $as_echo "$lt_cv_dlopen_self" >&6; } if test "x$lt_cv_dlopen_self" = xyes; then wl=$lt_prog_compiler_wl eval LDFLAGS=\"\$LDFLAGS $lt_prog_compiler_static\" { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether a statically linked program can dlopen itself" >&5 $as_echo_n "checking whether a statically linked program can dlopen itself... " >&6; } if ${lt_cv_dlopen_self_static+:} false; then : $as_echo_n "(cached) " >&6 else if test "$cross_compiling" = yes; then : lt_cv_dlopen_self_static=cross else lt_dlunknown=0; lt_dlno_uscore=1; lt_dlneed_uscore=2 lt_status=$lt_dlunknown cat > conftest.$ac_ext <<_LT_EOF #line $LINENO "configure" #include "confdefs.h" #if HAVE_DLFCN_H #include #endif #include #ifdef RTLD_GLOBAL # define LT_DLGLOBAL RTLD_GLOBAL #else # ifdef DL_GLOBAL # define LT_DLGLOBAL DL_GLOBAL # else # define LT_DLGLOBAL 0 # endif #endif /* We may have to define LT_DLLAZY_OR_NOW in the command line if we find out it does not work in some platform. */ #ifndef LT_DLLAZY_OR_NOW # ifdef RTLD_LAZY # define LT_DLLAZY_OR_NOW RTLD_LAZY # else # ifdef DL_LAZY # define LT_DLLAZY_OR_NOW DL_LAZY # else # ifdef RTLD_NOW # define LT_DLLAZY_OR_NOW RTLD_NOW # else # ifdef DL_NOW # define LT_DLLAZY_OR_NOW DL_NOW # else # define LT_DLLAZY_OR_NOW 0 # endif # endif # endif # endif #endif /* When -fvisbility=hidden is used, assume the code has been annotated correspondingly for the symbols needed. */ #if defined(__GNUC__) && (((__GNUC__ == 3) && (__GNUC_MINOR__ >= 3)) || (__GNUC__ > 3)) int fnord () __attribute__((visibility("default"))); #endif int fnord () { return 42; } int main () { void *self = dlopen (0, LT_DLGLOBAL|LT_DLLAZY_OR_NOW); int status = $lt_dlunknown; if (self) { if (dlsym (self,"fnord")) status = $lt_dlno_uscore; else { if (dlsym( self,"_fnord")) status = $lt_dlneed_uscore; else puts (dlerror ()); } /* dlclose (self); */ } else puts (dlerror ()); return status; } _LT_EOF if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_link\""; } >&5 (eval $ac_link) 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; } && test -s conftest${ac_exeext} 2>/dev/null; then (./conftest; exit; ) >&5 2>/dev/null lt_status=$? case x$lt_status in x$lt_dlno_uscore) lt_cv_dlopen_self_static=yes ;; x$lt_dlneed_uscore) lt_cv_dlopen_self_static=yes ;; x$lt_dlunknown|x*) lt_cv_dlopen_self_static=no ;; esac else : # compilation failed lt_cv_dlopen_self_static=no fi fi rm -fr conftest* fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_dlopen_self_static" >&5 $as_echo "$lt_cv_dlopen_self_static" >&6; } fi CPPFLAGS="$save_CPPFLAGS" LDFLAGS="$save_LDFLAGS" LIBS="$save_LIBS" ;; esac case $lt_cv_dlopen_self in yes|no) enable_dlopen_self=$lt_cv_dlopen_self ;; *) enable_dlopen_self=unknown ;; esac case $lt_cv_dlopen_self_static in yes|no) enable_dlopen_self_static=$lt_cv_dlopen_self_static ;; *) enable_dlopen_self_static=unknown ;; esac fi striplib= old_striplib= { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether stripping libraries is possible" >&5 $as_echo_n "checking whether stripping libraries is possible... " >&6; } if test -n "$STRIP" && $STRIP -V 2>&1 | $GREP "GNU strip" >/dev/null; then test -z "$old_striplib" && old_striplib="$STRIP --strip-debug" test -z "$striplib" && striplib="$STRIP --strip-unneeded" { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 $as_echo "yes" >&6; } else # FIXME - insert some real tests, host_os isn't really good enough case $host_os in darwin*) if test -n "$STRIP" ; then striplib="$STRIP -x" old_striplib="$STRIP -S" { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 $as_echo "yes" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi ;; *) { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } ;; esac fi # Report which library types will actually be built { $as_echo "$as_me:${as_lineno-$LINENO}: checking if libtool supports shared libraries" >&5 $as_echo_n "checking if libtool supports shared libraries... " >&6; } { $as_echo "$as_me:${as_lineno-$LINENO}: result: $can_build_shared" >&5 $as_echo "$can_build_shared" >&6; } { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether to build shared libraries" >&5 $as_echo_n "checking whether to build shared libraries... " >&6; } test "$can_build_shared" = "no" && enable_shared=no # On AIX, shared libraries and static libraries use the same namespace, and # are all built from PIC. case $host_os in aix3*) test "$enable_shared" = yes && enable_static=no if test -n "$RANLIB"; then archive_cmds="$archive_cmds~\$RANLIB \$lib" postinstall_cmds='$RANLIB $lib' fi ;; aix[4-9]*) if test "$host_cpu" != ia64 && test "$aix_use_runtimelinking" = no ; then test "$enable_shared" = yes && enable_static=no fi ;; esac { $as_echo "$as_me:${as_lineno-$LINENO}: result: $enable_shared" >&5 $as_echo "$enable_shared" >&6; } { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether to build static libraries" >&5 $as_echo_n "checking whether to build static libraries... " >&6; } # Make sure either enable_shared or enable_static is yes. test "$enable_shared" = yes || enable_static=yes { $as_echo "$as_me:${as_lineno-$LINENO}: result: $enable_static" >&5 $as_echo "$enable_static" >&6; } fi ac_ext=c ac_cpp='$CPP $CPPFLAGS' ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_c_compiler_gnu CC="$lt_save_CC" if test -n "$CXX" && ( test "X$CXX" != "Xno" && ( (test "X$CXX" = "Xg++" && `g++ -v >/dev/null 2>&1` ) || (test "X$CXX" != "Xg++"))) ; then ac_ext=cpp ac_cpp='$CXXCPP $CPPFLAGS' ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_cxx_compiler_gnu { $as_echo "$as_me:${as_lineno-$LINENO}: checking how to run the C++ preprocessor" >&5 $as_echo_n "checking how to run the C++ preprocessor... " >&6; } if test -z "$CXXCPP"; then if ${ac_cv_prog_CXXCPP+:} false; then : $as_echo_n "(cached) " >&6 else # Double quotes because CXXCPP needs to be expanded for CXXCPP in "$CXX -E" "/lib/cpp" do ac_preproc_ok=false for ac_cxx_preproc_warn_flag in '' yes do # Use a header file that comes with gcc, so configuring glibc # with a fresh cross-compiler works. # Prefer to if __STDC__ is defined, since # exists even on freestanding compilers. # On the NeXT, cc -E runs the code through the compiler's parser, # not just through cpp. "Syntax error" is here to catch this case. cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #ifdef __STDC__ # include #else # include #endif Syntax error _ACEOF if ac_fn_cxx_try_cpp "$LINENO"; then : else # Broken: fails on valid input. continue fi rm -f conftest.err conftest.i conftest.$ac_ext # OK, works on sane cases. Now check whether nonexistent headers # can be detected and how. cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include _ACEOF if ac_fn_cxx_try_cpp "$LINENO"; then : # Broken: success on invalid input. continue else # Passes both tests. ac_preproc_ok=: break fi rm -f conftest.err conftest.i conftest.$ac_ext done # Because of `break', _AC_PREPROC_IFELSE's cleaning code was skipped. rm -f conftest.i conftest.err conftest.$ac_ext if $ac_preproc_ok; then : break fi done ac_cv_prog_CXXCPP=$CXXCPP fi CXXCPP=$ac_cv_prog_CXXCPP else ac_cv_prog_CXXCPP=$CXXCPP fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CXXCPP" >&5 $as_echo "$CXXCPP" >&6; } ac_preproc_ok=false for ac_cxx_preproc_warn_flag in '' yes do # Use a header file that comes with gcc, so configuring glibc # with a fresh cross-compiler works. # Prefer to if __STDC__ is defined, since # exists even on freestanding compilers. # On the NeXT, cc -E runs the code through the compiler's parser, # not just through cpp. "Syntax error" is here to catch this case. cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #ifdef __STDC__ # include #else # include #endif Syntax error _ACEOF if ac_fn_cxx_try_cpp "$LINENO"; then : else # Broken: fails on valid input. continue fi rm -f conftest.err conftest.i conftest.$ac_ext # OK, works on sane cases. Now check whether nonexistent headers # can be detected and how. cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include _ACEOF if ac_fn_cxx_try_cpp "$LINENO"; then : # Broken: success on invalid input. continue else # Passes both tests. ac_preproc_ok=: break fi rm -f conftest.err conftest.i conftest.$ac_ext done # Because of `break', _AC_PREPROC_IFELSE's cleaning code was skipped. rm -f conftest.i conftest.err conftest.$ac_ext if $ac_preproc_ok; then : else { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 $as_echo "$as_me: error: in \`$ac_pwd':" >&2;} as_fn_error $? "C++ preprocessor \"$CXXCPP\" fails sanity check See \`config.log' for more details" "$LINENO" 5; } fi ac_ext=c ac_cpp='$CPP $CPPFLAGS' ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_c_compiler_gnu else _lt_caught_CXX_error=yes fi ac_ext=cpp ac_cpp='$CXXCPP $CPPFLAGS' ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_cxx_compiler_gnu archive_cmds_need_lc_CXX=no allow_undefined_flag_CXX= always_export_symbols_CXX=no archive_expsym_cmds_CXX= compiler_needs_object_CXX=no export_dynamic_flag_spec_CXX= hardcode_direct_CXX=no hardcode_direct_absolute_CXX=no hardcode_libdir_flag_spec_CXX= hardcode_libdir_separator_CXX= hardcode_minus_L_CXX=no hardcode_shlibpath_var_CXX=unsupported hardcode_automatic_CXX=no inherit_rpath_CXX=no module_cmds_CXX= module_expsym_cmds_CXX= link_all_deplibs_CXX=unknown old_archive_cmds_CXX=$old_archive_cmds reload_flag_CXX=$reload_flag reload_cmds_CXX=$reload_cmds no_undefined_flag_CXX= whole_archive_flag_spec_CXX= enable_shared_with_static_runtimes_CXX=no # Source file extension for C++ test sources. ac_ext=cpp # Object file extension for compiled C++ test sources. objext=o objext_CXX=$objext # No sense in running all these tests if we already determined that # the CXX compiler isn't working. Some variables (like enable_shared) # are currently assumed to apply to all compilers on this platform, # and will be corrupted by setting them based on a non-working compiler. if test "$_lt_caught_CXX_error" != yes; then # Code to be used in simple compile tests lt_simple_compile_test_code="int some_variable = 0;" # Code to be used in simple link tests lt_simple_link_test_code='int main(int, char *[]) { return(0); }' # ltmain only uses $CC for tagged configurations so make sure $CC is set. # If no C compiler was specified, use CC. LTCC=${LTCC-"$CC"} # If no C compiler flags were specified, use CFLAGS. LTCFLAGS=${LTCFLAGS-"$CFLAGS"} # Allow CC to be a program name with arguments. compiler=$CC # save warnings/boilerplate of simple test code ac_outfile=conftest.$ac_objext echo "$lt_simple_compile_test_code" >conftest.$ac_ext eval "$ac_compile" 2>&1 >/dev/null | $SED '/^$/d; /^ *+/d' >conftest.err _lt_compiler_boilerplate=`cat conftest.err` $RM conftest* ac_outfile=conftest.$ac_objext echo "$lt_simple_link_test_code" >conftest.$ac_ext eval "$ac_link" 2>&1 >/dev/null | $SED '/^$/d; /^ *+/d' >conftest.err _lt_linker_boilerplate=`cat conftest.err` $RM -r conftest* # Allow CC to be a program name with arguments. lt_save_CC=$CC lt_save_CFLAGS=$CFLAGS lt_save_LD=$LD lt_save_GCC=$GCC GCC=$GXX lt_save_with_gnu_ld=$with_gnu_ld lt_save_path_LD=$lt_cv_path_LD if test -n "${lt_cv_prog_gnu_ldcxx+set}"; then lt_cv_prog_gnu_ld=$lt_cv_prog_gnu_ldcxx else $as_unset lt_cv_prog_gnu_ld fi if test -n "${lt_cv_path_LDCXX+set}"; then lt_cv_path_LD=$lt_cv_path_LDCXX else $as_unset lt_cv_path_LD fi test -z "${LDCXX+set}" || LD=$LDCXX CC=${CXX-"c++"} CFLAGS=$CXXFLAGS compiler=$CC compiler_CXX=$CC for cc_temp in $compiler""; do case $cc_temp in compile | *[\\/]compile | ccache | *[\\/]ccache ) ;; distcc | *[\\/]distcc | purify | *[\\/]purify ) ;; \-*) ;; *) break;; esac done cc_basename=`$ECHO "$cc_temp" | $SED "s%.*/%%; s%^$host_alias-%%"` if test -n "$compiler"; then # We don't want -fno-exception when compiling C++ code, so set the # no_builtin_flag separately if test "$GXX" = yes; then lt_prog_compiler_no_builtin_flag_CXX=' -fno-builtin' else lt_prog_compiler_no_builtin_flag_CXX= fi if test "$GXX" = yes; then # Set up default GNU C++ configuration # Check whether --with-gnu-ld was given. if test "${with_gnu_ld+set}" = set; then : withval=$with_gnu_ld; test "$withval" = no || with_gnu_ld=yes else with_gnu_ld=no fi ac_prog=ld if test "$GCC" = yes; then # Check if gcc -print-prog-name=ld gives a path. { $as_echo "$as_me:${as_lineno-$LINENO}: checking for ld used by $CC" >&5 $as_echo_n "checking for ld used by $CC... " >&6; } case $host in *-*-mingw*) # gcc leaves a trailing carriage return which upsets mingw ac_prog=`($CC -print-prog-name=ld) 2>&5 | tr -d '\015'` ;; *) ac_prog=`($CC -print-prog-name=ld) 2>&5` ;; esac case $ac_prog in # Accept absolute paths. [\\/]* | ?:[\\/]*) re_direlt='/[^/][^/]*/\.\./' # Canonicalize the pathname of ld ac_prog=`$ECHO "$ac_prog"| $SED 's%\\\\%/%g'` while $ECHO "$ac_prog" | $GREP "$re_direlt" > /dev/null 2>&1; do ac_prog=`$ECHO $ac_prog| $SED "s%$re_direlt%/%"` done test -z "$LD" && LD="$ac_prog" ;; "") # If it fails, then pretend we aren't using GCC. ac_prog=ld ;; *) # If it is relative, then search for the first ld in PATH. with_gnu_ld=unknown ;; esac elif test "$with_gnu_ld" = yes; then { $as_echo "$as_me:${as_lineno-$LINENO}: checking for GNU ld" >&5 $as_echo_n "checking for GNU ld... " >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: checking for non-GNU ld" >&5 $as_echo_n "checking for non-GNU ld... " >&6; } fi if ${lt_cv_path_LD+:} false; then : $as_echo_n "(cached) " >&6 else if test -z "$LD"; then lt_save_ifs="$IFS"; IFS=$PATH_SEPARATOR for ac_dir in $PATH; do IFS="$lt_save_ifs" test -z "$ac_dir" && ac_dir=. if test -f "$ac_dir/$ac_prog" || test -f "$ac_dir/$ac_prog$ac_exeext"; then lt_cv_path_LD="$ac_dir/$ac_prog" # Check to see if the program is GNU ld. I'd rather use --version, # but apparently some variants of GNU ld only accept -v. # Break only if it was the GNU/non-GNU ld that we prefer. case `"$lt_cv_path_LD" -v 2>&1 &5 $as_echo "$LD" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi test -z "$LD" && as_fn_error $? "no acceptable ld found in \$PATH" "$LINENO" 5 { $as_echo "$as_me:${as_lineno-$LINENO}: checking if the linker ($LD) is GNU ld" >&5 $as_echo_n "checking if the linker ($LD) is GNU ld... " >&6; } if ${lt_cv_prog_gnu_ld+:} false; then : $as_echo_n "(cached) " >&6 else # I'd rather use --version here, but apparently some GNU lds only accept -v. case `$LD -v 2>&1 &5 $as_echo "$lt_cv_prog_gnu_ld" >&6; } with_gnu_ld=$lt_cv_prog_gnu_ld # Check if GNU C++ uses GNU ld as the underlying linker, since the # archiving commands below assume that GNU ld is being used. if test "$with_gnu_ld" = yes; then archive_cmds_CXX='$CC $pic_flag -shared -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname $wl$soname -o $lib' archive_expsym_cmds_CXX='$CC $pic_flag -shared -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' hardcode_libdir_flag_spec_CXX='${wl}-rpath ${wl}$libdir' export_dynamic_flag_spec_CXX='${wl}--export-dynamic' # If archive_cmds runs LD, not CC, wlarc should be empty # XXX I think wlarc can be eliminated in ltcf-cxx, but I need to # investigate it a little bit more. (MM) wlarc='${wl}' # ancient GNU ld didn't support --whole-archive et. al. if eval "`$CC -print-prog-name=ld` --help 2>&1" | $GREP 'no-whole-archive' > /dev/null; then whole_archive_flag_spec_CXX="$wlarc"'--whole-archive$convenience '"$wlarc"'--no-whole-archive' else whole_archive_flag_spec_CXX= fi else with_gnu_ld=no wlarc= # A generic and very simple default shared library creation # command for GNU C++ for the case where it uses the native # linker, instead of GNU ld. If possible, this setting should # overridden to take advantage of the native linker features on # the platform it is being used on. archive_cmds_CXX='$CC -shared -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -o $lib' fi # Commands to make compiler produce verbose output that lists # what "hidden" libraries, object files and flags are used when # linking a shared library. output_verbose_link_cmd='$CC -shared $CFLAGS -v conftest.$objext 2>&1 | $GREP -v "^Configured with:" | $GREP "\-L"' else GXX=no with_gnu_ld=no wlarc= fi # PORTME: fill in a description of your system's C++ link characteristics { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether the $compiler linker ($LD) supports shared libraries" >&5 $as_echo_n "checking whether the $compiler linker ($LD) supports shared libraries... " >&6; } ld_shlibs_CXX=yes case $host_os in aix3*) # FIXME: insert proper C++ library support ld_shlibs_CXX=no ;; aix[4-9]*) if test "$host_cpu" = ia64; then # On IA64, the linker does run time linking by default, so we don't # have to do anything special. aix_use_runtimelinking=no exp_sym_flag='-Bexport' no_entry_flag="" else aix_use_runtimelinking=no # Test if we are trying to use run time linking or normal # AIX style linking. If -brtl is somewhere in LDFLAGS, we # need to do runtime linking. case $host_os in aix4.[23]|aix4.[23].*|aix[5-9]*) for ld_flag in $LDFLAGS; do case $ld_flag in *-brtl*) aix_use_runtimelinking=yes break ;; esac done ;; esac exp_sym_flag='-bexport' no_entry_flag='-bnoentry' fi # When large executables or shared objects are built, AIX ld can # have problems creating the table of contents. If linking a library # or program results in "error TOC overflow" add -mminimal-toc to # CXXFLAGS/CFLAGS for g++/gcc. In the cases where that is not # enough to fix the problem, add -Wl,-bbigtoc to LDFLAGS. archive_cmds_CXX='' hardcode_direct_CXX=yes hardcode_direct_absolute_CXX=yes hardcode_libdir_separator_CXX=':' link_all_deplibs_CXX=yes file_list_spec_CXX='${wl}-f,' if test "$GXX" = yes; then case $host_os in aix4.[012]|aix4.[012].*) # We only want to do this on AIX 4.2 and lower, the check # below for broken collect2 doesn't work under 4.3+ collect2name=`${CC} -print-prog-name=collect2` if test -f "$collect2name" && strings "$collect2name" | $GREP resolve_lib_name >/dev/null then # We have reworked collect2 : else # We have old collect2 hardcode_direct_CXX=unsupported # It fails to find uninstalled libraries when the uninstalled # path is not listed in the libpath. Setting hardcode_minus_L # to unsupported forces relinking hardcode_minus_L_CXX=yes hardcode_libdir_flag_spec_CXX='-L$libdir' hardcode_libdir_separator_CXX= fi esac shared_flag='-shared' if test "$aix_use_runtimelinking" = yes; then shared_flag="$shared_flag "'${wl}-G' fi else # not using gcc if test "$host_cpu" = ia64; then # VisualAge C++, Version 5.5 for AIX 5L for IA-64, Beta 3 Release # chokes on -Wl,-G. The following line is correct: shared_flag='-G' else if test "$aix_use_runtimelinking" = yes; then shared_flag='${wl}-G' else shared_flag='${wl}-bM:SRE' fi fi fi export_dynamic_flag_spec_CXX='${wl}-bexpall' # It seems that -bexpall does not export symbols beginning with # underscore (_), so it is better to generate a list of symbols to # export. always_export_symbols_CXX=yes if test "$aix_use_runtimelinking" = yes; then # Warning - without using the other runtime loading flags (-brtl), # -berok will link without error, but may produce a broken library. allow_undefined_flag_CXX='-berok' # Determine the default libpath from the value encoded in an empty # executable. if test "${lt_cv_aix_libpath+set}" = set; then aix_libpath=$lt_cv_aix_libpath else if ${lt_cv_aix_libpath__CXX+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { ; return 0; } _ACEOF if ac_fn_cxx_try_link "$LINENO"; then : lt_aix_libpath_sed=' /Import File Strings/,/^$/ { /^0/ { s/^0 *\([^ ]*\) *$/\1/ p } }' lt_cv_aix_libpath__CXX=`dump -H conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` # Check for a 64-bit object if we didn't find anything. if test -z "$lt_cv_aix_libpath__CXX"; then lt_cv_aix_libpath__CXX=`dump -HX64 conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` fi fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext if test -z "$lt_cv_aix_libpath__CXX"; then lt_cv_aix_libpath__CXX="/usr/lib:/lib" fi fi aix_libpath=$lt_cv_aix_libpath__CXX fi hardcode_libdir_flag_spec_CXX='${wl}-blibpath:$libdir:'"$aix_libpath" archive_expsym_cmds_CXX='$CC -o $output_objdir/$soname $libobjs $deplibs '"\${wl}$no_entry_flag"' $compiler_flags `if test "x${allow_undefined_flag}" != "x"; then func_echo_all "${wl}${allow_undefined_flag}"; else :; fi` '"\${wl}$exp_sym_flag:\$export_symbols $shared_flag" else if test "$host_cpu" = ia64; then hardcode_libdir_flag_spec_CXX='${wl}-R $libdir:/usr/lib:/lib' allow_undefined_flag_CXX="-z nodefs" archive_expsym_cmds_CXX="\$CC $shared_flag"' -o $output_objdir/$soname $libobjs $deplibs '"\${wl}$no_entry_flag"' $compiler_flags ${wl}${allow_undefined_flag} '"\${wl}$exp_sym_flag:\$export_symbols" else # Determine the default libpath from the value encoded in an # empty executable. if test "${lt_cv_aix_libpath+set}" = set; then aix_libpath=$lt_cv_aix_libpath else if ${lt_cv_aix_libpath__CXX+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { ; return 0; } _ACEOF if ac_fn_cxx_try_link "$LINENO"; then : lt_aix_libpath_sed=' /Import File Strings/,/^$/ { /^0/ { s/^0 *\([^ ]*\) *$/\1/ p } }' lt_cv_aix_libpath__CXX=`dump -H conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` # Check for a 64-bit object if we didn't find anything. if test -z "$lt_cv_aix_libpath__CXX"; then lt_cv_aix_libpath__CXX=`dump -HX64 conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` fi fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext if test -z "$lt_cv_aix_libpath__CXX"; then lt_cv_aix_libpath__CXX="/usr/lib:/lib" fi fi aix_libpath=$lt_cv_aix_libpath__CXX fi hardcode_libdir_flag_spec_CXX='${wl}-blibpath:$libdir:'"$aix_libpath" # Warning - without using the other run time loading flags, # -berok will link without error, but may produce a broken library. no_undefined_flag_CXX=' ${wl}-bernotok' allow_undefined_flag_CXX=' ${wl}-berok' if test "$with_gnu_ld" = yes; then # We only use this code for GNU lds that support --whole-archive. whole_archive_flag_spec_CXX='${wl}--whole-archive$convenience ${wl}--no-whole-archive' else # Exported symbols can be pulled into shared objects from archives whole_archive_flag_spec_CXX='$convenience' fi archive_cmds_need_lc_CXX=yes # This is similar to how AIX traditionally builds its shared # libraries. archive_expsym_cmds_CXX="\$CC $shared_flag"' -o $output_objdir/$soname $libobjs $deplibs ${wl}-bnoentry $compiler_flags ${wl}-bE:$export_symbols${allow_undefined_flag}~$AR $AR_FLAGS $output_objdir/$libname$release.a $output_objdir/$soname' fi fi ;; beos*) if $LD --help 2>&1 | $GREP ': supported targets:.* elf' > /dev/null; then allow_undefined_flag_CXX=unsupported # Joseph Beckenbach says some releases of gcc # support --undefined. This deserves some investigation. FIXME archive_cmds_CXX='$CC -nostart $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' else ld_shlibs_CXX=no fi ;; chorus*) case $cc_basename in *) # FIXME: insert proper C++ library support ld_shlibs_CXX=no ;; esac ;; cygwin* | mingw* | pw32* | cegcc*) case $GXX,$cc_basename in ,cl* | no,cl*) # Native MSVC # hardcode_libdir_flag_spec is actually meaningless, as there is # no search path for DLLs. hardcode_libdir_flag_spec_CXX=' ' allow_undefined_flag_CXX=unsupported always_export_symbols_CXX=yes file_list_spec_CXX='@' # Tell ltmain to make .lib files, not .a files. libext=lib # Tell ltmain to make .dll files, not .so files. shrext_cmds=".dll" # FIXME: Setting linknames here is a bad hack. archive_cmds_CXX='$CC -o $output_objdir/$soname $libobjs $compiler_flags $deplibs -Wl,-dll~linknames=' archive_expsym_cmds_CXX='if test "x`$SED 1q $export_symbols`" = xEXPORTS; then $SED -n -e 's/\\\\\\\(.*\\\\\\\)/-link\\\ -EXPORT:\\\\\\\1/' -e '1\\\!p' < $export_symbols > $output_objdir/$soname.exp; else $SED -e 's/\\\\\\\(.*\\\\\\\)/-link\\\ -EXPORT:\\\\\\\1/' < $export_symbols > $output_objdir/$soname.exp; fi~ $CC -o $tool_output_objdir$soname $libobjs $compiler_flags $deplibs "@$tool_output_objdir$soname.exp" -Wl,-DLL,-IMPLIB:"$tool_output_objdir$libname.dll.lib"~ linknames=' # The linker will not automatically build a static lib if we build a DLL. # _LT_TAGVAR(old_archive_from_new_cmds, CXX)='true' enable_shared_with_static_runtimes_CXX=yes # Don't use ranlib old_postinstall_cmds_CXX='chmod 644 $oldlib' postlink_cmds_CXX='lt_outputfile="@OUTPUT@"~ lt_tool_outputfile="@TOOL_OUTPUT@"~ case $lt_outputfile in *.exe|*.EXE) ;; *) lt_outputfile="$lt_outputfile.exe" lt_tool_outputfile="$lt_tool_outputfile.exe" ;; esac~ func_to_tool_file "$lt_outputfile"~ if test "$MANIFEST_TOOL" != ":" && test -f "$lt_outputfile.manifest"; then $MANIFEST_TOOL -manifest "$lt_tool_outputfile.manifest" -outputresource:"$lt_tool_outputfile" || exit 1; $RM "$lt_outputfile.manifest"; fi' ;; *) # g++ # _LT_TAGVAR(hardcode_libdir_flag_spec, CXX) is actually meaningless, # as there is no search path for DLLs. hardcode_libdir_flag_spec_CXX='-L$libdir' export_dynamic_flag_spec_CXX='${wl}--export-all-symbols' allow_undefined_flag_CXX=unsupported always_export_symbols_CXX=no enable_shared_with_static_runtimes_CXX=yes if $LD --help 2>&1 | $GREP 'auto-import' > /dev/null; then archive_cmds_CXX='$CC -shared -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -o $output_objdir/$soname ${wl}--enable-auto-image-base -Xlinker --out-implib -Xlinker $lib' # If the export-symbols file already is a .def file (1st line # is EXPORTS), use it as is; otherwise, prepend... archive_expsym_cmds_CXX='if test "x`$SED 1q $export_symbols`" = xEXPORTS; then cp $export_symbols $output_objdir/$soname.def; else echo EXPORTS > $output_objdir/$soname.def; cat $export_symbols >> $output_objdir/$soname.def; fi~ $CC -shared -nostdlib $output_objdir/$soname.def $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -o $output_objdir/$soname ${wl}--enable-auto-image-base -Xlinker --out-implib -Xlinker $lib' else ld_shlibs_CXX=no fi ;; esac ;; darwin* | rhapsody*) archive_cmds_need_lc_CXX=no hardcode_direct_CXX=no hardcode_automatic_CXX=yes hardcode_shlibpath_var_CXX=unsupported if test "$lt_cv_ld_force_load" = "yes"; then whole_archive_flag_spec_CXX='`for conv in $convenience\"\"; do test -n \"$conv\" && new_convenience=\"$new_convenience ${wl}-force_load,$conv\"; done; func_echo_all \"$new_convenience\"`' else whole_archive_flag_spec_CXX='' fi link_all_deplibs_CXX=yes allow_undefined_flag_CXX="$_lt_dar_allow_undefined" case $cc_basename in ifort*) _lt_dar_can_shared=yes ;; *) _lt_dar_can_shared=$GCC ;; esac if test "$_lt_dar_can_shared" = "yes"; then output_verbose_link_cmd=func_echo_all archive_cmds_CXX="\$CC -dynamiclib \$allow_undefined_flag -o \$lib \$libobjs \$deplibs \$compiler_flags -install_name \$rpath/\$soname \$verstring $_lt_dar_single_mod${_lt_dsymutil}" module_cmds_CXX="\$CC \$allow_undefined_flag -o \$lib -bundle \$libobjs \$deplibs \$compiler_flags${_lt_dsymutil}" archive_expsym_cmds_CXX="sed 's,^,_,' < \$export_symbols > \$output_objdir/\${libname}-symbols.expsym~\$CC -dynamiclib \$allow_undefined_flag -o \$lib \$libobjs \$deplibs \$compiler_flags -install_name \$rpath/\$soname \$verstring ${_lt_dar_single_mod}${_lt_dar_export_syms}${_lt_dsymutil}" module_expsym_cmds_CXX="sed -e 's,^,_,' < \$export_symbols > \$output_objdir/\${libname}-symbols.expsym~\$CC \$allow_undefined_flag -o \$lib -bundle \$libobjs \$deplibs \$compiler_flags${_lt_dar_export_syms}${_lt_dsymutil}" if test "$lt_cv_apple_cc_single_mod" != "yes"; then archive_cmds_CXX="\$CC -r -keep_private_externs -nostdlib -o \${lib}-master.o \$libobjs~\$CC -dynamiclib \$allow_undefined_flag -o \$lib \${lib}-master.o \$deplibs \$compiler_flags -install_name \$rpath/\$soname \$verstring${_lt_dsymutil}" archive_expsym_cmds_CXX="sed 's,^,_,' < \$export_symbols > \$output_objdir/\${libname}-symbols.expsym~\$CC -r -keep_private_externs -nostdlib -o \${lib}-master.o \$libobjs~\$CC -dynamiclib \$allow_undefined_flag -o \$lib \${lib}-master.o \$deplibs \$compiler_flags -install_name \$rpath/\$soname \$verstring${_lt_dar_export_syms}${_lt_dsymutil}" fi else ld_shlibs_CXX=no fi ;; dgux*) case $cc_basename in ec++*) # FIXME: insert proper C++ library support ld_shlibs_CXX=no ;; ghcx*) # Green Hills C++ Compiler # FIXME: insert proper C++ library support ld_shlibs_CXX=no ;; *) # FIXME: insert proper C++ library support ld_shlibs_CXX=no ;; esac ;; freebsd2.*) # C++ shared libraries reported to be fairly broken before # switch to ELF ld_shlibs_CXX=no ;; freebsd-elf*) archive_cmds_need_lc_CXX=no ;; freebsd* | dragonfly*) # FreeBSD 3 and later use GNU C++ and GNU ld with standard ELF # conventions ld_shlibs_CXX=yes ;; gnu*) ;; haiku*) archive_cmds_CXX='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' link_all_deplibs_CXX=yes ;; hpux9*) hardcode_libdir_flag_spec_CXX='${wl}+b ${wl}$libdir' hardcode_libdir_separator_CXX=: export_dynamic_flag_spec_CXX='${wl}-E' hardcode_direct_CXX=yes hardcode_minus_L_CXX=yes # Not in the search PATH, # but as the default # location of the library. case $cc_basename in CC*) # FIXME: insert proper C++ library support ld_shlibs_CXX=no ;; aCC*) archive_cmds_CXX='$RM $output_objdir/$soname~$CC -b ${wl}+b ${wl}$install_libdir -o $output_objdir/$soname $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags~test $output_objdir/$soname = $lib || mv $output_objdir/$soname $lib' # Commands to make compiler produce verbose output that lists # what "hidden" libraries, object files and flags are used when # linking a shared library. # # There doesn't appear to be a way to prevent this compiler from # explicitly linking system object files so we need to strip them # from the output so that they don't get included in the library # dependencies. output_verbose_link_cmd='templist=`($CC -b $CFLAGS -v conftest.$objext 2>&1) | $EGREP "\-L"`; list=""; for z in $templist; do case $z in conftest.$objext) list="$list $z";; *.$objext);; *) list="$list $z";;esac; done; func_echo_all "$list"' ;; *) if test "$GXX" = yes; then archive_cmds_CXX='$RM $output_objdir/$soname~$CC -shared -nostdlib $pic_flag ${wl}+b ${wl}$install_libdir -o $output_objdir/$soname $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags~test $output_objdir/$soname = $lib || mv $output_objdir/$soname $lib' else # FIXME: insert proper C++ library support ld_shlibs_CXX=no fi ;; esac ;; hpux10*|hpux11*) if test $with_gnu_ld = no; then hardcode_libdir_flag_spec_CXX='${wl}+b ${wl}$libdir' hardcode_libdir_separator_CXX=: case $host_cpu in hppa*64*|ia64*) ;; *) export_dynamic_flag_spec_CXX='${wl}-E' ;; esac fi case $host_cpu in hppa*64*|ia64*) hardcode_direct_CXX=no hardcode_shlibpath_var_CXX=no ;; *) hardcode_direct_CXX=yes hardcode_direct_absolute_CXX=yes hardcode_minus_L_CXX=yes # Not in the search PATH, # but as the default # location of the library. ;; esac case $cc_basename in CC*) # FIXME: insert proper C++ library support ld_shlibs_CXX=no ;; aCC*) case $host_cpu in hppa*64*) archive_cmds_CXX='$CC -b ${wl}+h ${wl}$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' ;; ia64*) archive_cmds_CXX='$CC -b ${wl}+h ${wl}$soname ${wl}+nodefaultrpath -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' ;; *) archive_cmds_CXX='$CC -b ${wl}+h ${wl}$soname ${wl}+b ${wl}$install_libdir -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' ;; esac # Commands to make compiler produce verbose output that lists # what "hidden" libraries, object files and flags are used when # linking a shared library. # # There doesn't appear to be a way to prevent this compiler from # explicitly linking system object files so we need to strip them # from the output so that they don't get included in the library # dependencies. output_verbose_link_cmd='templist=`($CC -b $CFLAGS -v conftest.$objext 2>&1) | $GREP "\-L"`; list=""; for z in $templist; do case $z in conftest.$objext) list="$list $z";; *.$objext);; *) list="$list $z";;esac; done; func_echo_all "$list"' ;; *) if test "$GXX" = yes; then if test $with_gnu_ld = no; then case $host_cpu in hppa*64*) archive_cmds_CXX='$CC -shared -nostdlib -fPIC ${wl}+h ${wl}$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' ;; ia64*) archive_cmds_CXX='$CC -shared -nostdlib $pic_flag ${wl}+h ${wl}$soname ${wl}+nodefaultrpath -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' ;; *) archive_cmds_CXX='$CC -shared -nostdlib $pic_flag ${wl}+h ${wl}$soname ${wl}+b ${wl}$install_libdir -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' ;; esac fi else # FIXME: insert proper C++ library support ld_shlibs_CXX=no fi ;; esac ;; interix[3-9]*) hardcode_direct_CXX=no hardcode_shlibpath_var_CXX=no hardcode_libdir_flag_spec_CXX='${wl}-rpath,$libdir' export_dynamic_flag_spec_CXX='${wl}-E' # Hack: On Interix 3.x, we cannot compile PIC because of a broken gcc. # Instead, shared libraries are loaded at an image base (0x10000000 by # default) and relocated if they conflict, which is a slow very memory # consuming and fragmenting process. To avoid this, we pick a random, # 256 KiB-aligned image base between 0x50000000 and 0x6FFC0000 at link # time. Moving up from 0x10000000 also allows more sbrk(2) space. archive_cmds_CXX='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-h,$soname ${wl}--image-base,`expr ${RANDOM-$$} % 4096 / 2 \* 262144 + 1342177280` -o $lib' archive_expsym_cmds_CXX='sed "s,^,_," $export_symbols >$output_objdir/$soname.expsym~$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-h,$soname ${wl}--retain-symbols-file,$output_objdir/$soname.expsym ${wl}--image-base,`expr ${RANDOM-$$} % 4096 / 2 \* 262144 + 1342177280` -o $lib' ;; irix5* | irix6*) case $cc_basename in CC*) # SGI C++ archive_cmds_CXX='$CC -shared -all -multigot $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -soname $soname `test -n "$verstring" && func_echo_all "-set_version $verstring"` -update_registry ${output_objdir}/so_locations -o $lib' # Archives containing C++ object files must be created using # "CC -ar", where "CC" is the IRIX C++ compiler. This is # necessary to make sure instantiated templates are included # in the archive. old_archive_cmds_CXX='$CC -ar -WR,-u -o $oldlib $oldobjs' ;; *) if test "$GXX" = yes; then if test "$with_gnu_ld" = no; then archive_cmds_CXX='$CC -shared $pic_flag -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && func_echo_all "${wl}-set_version ${wl}$verstring"` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib' else archive_cmds_CXX='$CC -shared $pic_flag -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && func_echo_all "${wl}-set_version ${wl}$verstring"` -o $lib' fi fi link_all_deplibs_CXX=yes ;; esac hardcode_libdir_flag_spec_CXX='${wl}-rpath ${wl}$libdir' hardcode_libdir_separator_CXX=: inherit_rpath_CXX=yes ;; linux* | k*bsd*-gnu | kopensolaris*-gnu) case $cc_basename in KCC*) # Kuck and Associates, Inc. (KAI) C++ Compiler # KCC will only create a shared library if the output file # ends with ".so" (or ".sl" for HP-UX), so rename the library # to its proper name (with version) after linking. archive_cmds_CXX='tempext=`echo $shared_ext | $SED -e '\''s/\([^()0-9A-Za-z{}]\)/\\\\\1/g'\''`; templib=`echo $lib | $SED -e "s/\${tempext}\..*/.so/"`; $CC $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags --soname $soname -o \$templib; mv \$templib $lib' archive_expsym_cmds_CXX='tempext=`echo $shared_ext | $SED -e '\''s/\([^()0-9A-Za-z{}]\)/\\\\\1/g'\''`; templib=`echo $lib | $SED -e "s/\${tempext}\..*/.so/"`; $CC $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags --soname $soname -o \$templib ${wl}-retain-symbols-file,$export_symbols; mv \$templib $lib' # Commands to make compiler produce verbose output that lists # what "hidden" libraries, object files and flags are used when # linking a shared library. # # There doesn't appear to be a way to prevent this compiler from # explicitly linking system object files so we need to strip them # from the output so that they don't get included in the library # dependencies. output_verbose_link_cmd='templist=`$CC $CFLAGS -v conftest.$objext -o libconftest$shared_ext 2>&1 | $GREP "ld"`; rm -f libconftest$shared_ext; list=""; for z in $templist; do case $z in conftest.$objext) list="$list $z";; *.$objext);; *) list="$list $z";;esac; done; func_echo_all "$list"' hardcode_libdir_flag_spec_CXX='${wl}-rpath,$libdir' export_dynamic_flag_spec_CXX='${wl}--export-dynamic' # Archives containing C++ object files must be created using # "CC -Bstatic", where "CC" is the KAI C++ compiler. old_archive_cmds_CXX='$CC -Bstatic -o $oldlib $oldobjs' ;; icpc* | ecpc* ) # Intel C++ with_gnu_ld=yes # version 8.0 and above of icpc choke on multiply defined symbols # if we add $predep_objects and $postdep_objects, however 7.1 and # earlier do not add the objects themselves. case `$CC -V 2>&1` in *"Version 7."*) archive_cmds_CXX='$CC -shared $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname $wl$soname -o $lib' archive_expsym_cmds_CXX='$CC -shared $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' ;; *) # Version 8.0 or newer tmp_idyn= case $host_cpu in ia64*) tmp_idyn=' -i_dynamic';; esac archive_cmds_CXX='$CC -shared'"$tmp_idyn"' $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' archive_expsym_cmds_CXX='$CC -shared'"$tmp_idyn"' $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' ;; esac archive_cmds_need_lc_CXX=no hardcode_libdir_flag_spec_CXX='${wl}-rpath,$libdir' export_dynamic_flag_spec_CXX='${wl}--export-dynamic' whole_archive_flag_spec_CXX='${wl}--whole-archive$convenience ${wl}--no-whole-archive' ;; pgCC* | pgcpp*) # Portland Group C++ compiler case `$CC -V` in *pgCC\ [1-5].* | *pgcpp\ [1-5].*) prelink_cmds_CXX='tpldir=Template.dir~ rm -rf $tpldir~ $CC --prelink_objects --instantiation_dir $tpldir $objs $libobjs $compile_deplibs~ compile_command="$compile_command `find $tpldir -name \*.o | sort | $NL2SP`"' old_archive_cmds_CXX='tpldir=Template.dir~ rm -rf $tpldir~ $CC --prelink_objects --instantiation_dir $tpldir $oldobjs$old_deplibs~ $AR $AR_FLAGS $oldlib$oldobjs$old_deplibs `find $tpldir -name \*.o | sort | $NL2SP`~ $RANLIB $oldlib' archive_cmds_CXX='tpldir=Template.dir~ rm -rf $tpldir~ $CC --prelink_objects --instantiation_dir $tpldir $predep_objects $libobjs $deplibs $convenience $postdep_objects~ $CC -shared $pic_flag $predep_objects $libobjs $deplibs `find $tpldir -name \*.o | sort | $NL2SP` $postdep_objects $compiler_flags ${wl}-soname ${wl}$soname -o $lib' archive_expsym_cmds_CXX='tpldir=Template.dir~ rm -rf $tpldir~ $CC --prelink_objects --instantiation_dir $tpldir $predep_objects $libobjs $deplibs $convenience $postdep_objects~ $CC -shared $pic_flag $predep_objects $libobjs $deplibs `find $tpldir -name \*.o | sort | $NL2SP` $postdep_objects $compiler_flags ${wl}-soname ${wl}$soname ${wl}-retain-symbols-file ${wl}$export_symbols -o $lib' ;; *) # Version 6 and above use weak symbols archive_cmds_CXX='$CC -shared $pic_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname ${wl}$soname -o $lib' archive_expsym_cmds_CXX='$CC -shared $pic_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname ${wl}$soname ${wl}-retain-symbols-file ${wl}$export_symbols -o $lib' ;; esac hardcode_libdir_flag_spec_CXX='${wl}--rpath ${wl}$libdir' export_dynamic_flag_spec_CXX='${wl}--export-dynamic' whole_archive_flag_spec_CXX='${wl}--whole-archive`for conv in $convenience\"\"; do test -n \"$conv\" && new_convenience=\"$new_convenience,$conv\"; done; func_echo_all \"$new_convenience\"` ${wl}--no-whole-archive' ;; cxx*) # Compaq C++ archive_cmds_CXX='$CC -shared $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname $wl$soname -o $lib' archive_expsym_cmds_CXX='$CC -shared $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname $wl$soname -o $lib ${wl}-retain-symbols-file $wl$export_symbols' runpath_var=LD_RUN_PATH hardcode_libdir_flag_spec_CXX='-rpath $libdir' hardcode_libdir_separator_CXX=: # Commands to make compiler produce verbose output that lists # what "hidden" libraries, object files and flags are used when # linking a shared library. # # There doesn't appear to be a way to prevent this compiler from # explicitly linking system object files so we need to strip them # from the output so that they don't get included in the library # dependencies. output_verbose_link_cmd='templist=`$CC -shared $CFLAGS -v conftest.$objext 2>&1 | $GREP "ld"`; templist=`func_echo_all "$templist" | $SED "s/\(^.*ld.*\)\( .*ld .*$\)/\1/"`; list=""; for z in $templist; do case $z in conftest.$objext) list="$list $z";; *.$objext);; *) list="$list $z";;esac; done; func_echo_all "X$list" | $Xsed' ;; xl* | mpixl* | bgxl*) # IBM XL 8.0 on PPC, with GNU ld hardcode_libdir_flag_spec_CXX='${wl}-rpath ${wl}$libdir' export_dynamic_flag_spec_CXX='${wl}--export-dynamic' archive_cmds_CXX='$CC -qmkshrobj $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' if test "x$supports_anon_versioning" = xyes; then archive_expsym_cmds_CXX='echo "{ global:" > $output_objdir/$libname.ver~ cat $export_symbols | sed -e "s/\(.*\)/\1;/" >> $output_objdir/$libname.ver~ echo "local: *; };" >> $output_objdir/$libname.ver~ $CC -qmkshrobj $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-version-script ${wl}$output_objdir/$libname.ver -o $lib' fi ;; *) case `$CC -V 2>&1 | sed 5q` in *Sun\ C*) # Sun C++ 5.9 no_undefined_flag_CXX=' -zdefs' archive_cmds_CXX='$CC -G${allow_undefined_flag} -h$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' archive_expsym_cmds_CXX='$CC -G${allow_undefined_flag} -h$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-retain-symbols-file ${wl}$export_symbols' hardcode_libdir_flag_spec_CXX='-R$libdir' whole_archive_flag_spec_CXX='${wl}--whole-archive`new_convenience=; for conv in $convenience\"\"; do test -z \"$conv\" || new_convenience=\"$new_convenience,$conv\"; done; func_echo_all \"$new_convenience\"` ${wl}--no-whole-archive' compiler_needs_object_CXX=yes # Not sure whether something based on # $CC $CFLAGS -v conftest.$objext -o libconftest$shared_ext 2>&1 # would be better. output_verbose_link_cmd='func_echo_all' # Archives containing C++ object files must be created using # "CC -xar", where "CC" is the Sun C++ compiler. This is # necessary to make sure instantiated templates are included # in the archive. old_archive_cmds_CXX='$CC -xar -o $oldlib $oldobjs' ;; esac ;; esac ;; lynxos*) # FIXME: insert proper C++ library support ld_shlibs_CXX=no ;; m88k*) # FIXME: insert proper C++ library support ld_shlibs_CXX=no ;; mvs*) case $cc_basename in cxx*) # FIXME: insert proper C++ library support ld_shlibs_CXX=no ;; *) # FIXME: insert proper C++ library support ld_shlibs_CXX=no ;; esac ;; netbsd*) if echo __ELF__ | $CC -E - | $GREP __ELF__ >/dev/null; then archive_cmds_CXX='$LD -Bshareable -o $lib $predep_objects $libobjs $deplibs $postdep_objects $linker_flags' wlarc= hardcode_libdir_flag_spec_CXX='-R$libdir' hardcode_direct_CXX=yes hardcode_shlibpath_var_CXX=no fi # Workaround some broken pre-1.5 toolchains output_verbose_link_cmd='$CC -shared $CFLAGS -v conftest.$objext 2>&1 | $GREP conftest.$objext | $SED -e "s:-lgcc -lc -lgcc::"' ;; *nto* | *qnx*) ld_shlibs_CXX=yes ;; openbsd2*) # C++ shared libraries are fairly broken ld_shlibs_CXX=no ;; openbsd*) if test -f /usr/libexec/ld.so; then hardcode_direct_CXX=yes hardcode_shlibpath_var_CXX=no hardcode_direct_absolute_CXX=yes archive_cmds_CXX='$CC -shared $pic_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -o $lib' hardcode_libdir_flag_spec_CXX='${wl}-rpath,$libdir' if test -z "`echo __ELF__ | $CC -E - | grep __ELF__`" || test "$host_os-$host_cpu" = "openbsd2.8-powerpc"; then archive_expsym_cmds_CXX='$CC -shared $pic_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-retain-symbols-file,$export_symbols -o $lib' export_dynamic_flag_spec_CXX='${wl}-E' whole_archive_flag_spec_CXX="$wlarc"'--whole-archive$convenience '"$wlarc"'--no-whole-archive' fi output_verbose_link_cmd=func_echo_all else ld_shlibs_CXX=no fi ;; osf3* | osf4* | osf5*) case $cc_basename in KCC*) # Kuck and Associates, Inc. (KAI) C++ Compiler # KCC will only create a shared library if the output file # ends with ".so" (or ".sl" for HP-UX), so rename the library # to its proper name (with version) after linking. archive_cmds_CXX='tempext=`echo $shared_ext | $SED -e '\''s/\([^()0-9A-Za-z{}]\)/\\\\\1/g'\''`; templib=`echo "$lib" | $SED -e "s/\${tempext}\..*/.so/"`; $CC $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags --soname $soname -o \$templib; mv \$templib $lib' hardcode_libdir_flag_spec_CXX='${wl}-rpath,$libdir' hardcode_libdir_separator_CXX=: # Archives containing C++ object files must be created using # the KAI C++ compiler. case $host in osf3*) old_archive_cmds_CXX='$CC -Bstatic -o $oldlib $oldobjs' ;; *) old_archive_cmds_CXX='$CC -o $oldlib $oldobjs' ;; esac ;; RCC*) # Rational C++ 2.4.1 # FIXME: insert proper C++ library support ld_shlibs_CXX=no ;; cxx*) case $host in osf3*) allow_undefined_flag_CXX=' ${wl}-expect_unresolved ${wl}\*' archive_cmds_CXX='$CC -shared${allow_undefined_flag} $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname $soname `test -n "$verstring" && func_echo_all "${wl}-set_version $verstring"` -update_registry ${output_objdir}/so_locations -o $lib' hardcode_libdir_flag_spec_CXX='${wl}-rpath ${wl}$libdir' ;; *) allow_undefined_flag_CXX=' -expect_unresolved \*' archive_cmds_CXX='$CC -shared${allow_undefined_flag} $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -msym -soname $soname `test -n "$verstring" && func_echo_all "-set_version $verstring"` -update_registry ${output_objdir}/so_locations -o $lib' archive_expsym_cmds_CXX='for i in `cat $export_symbols`; do printf "%s %s\\n" -exported_symbol "\$i" >> $lib.exp; done~ echo "-hidden">> $lib.exp~ $CC -shared$allow_undefined_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -msym -soname $soname ${wl}-input ${wl}$lib.exp `test -n "$verstring" && $ECHO "-set_version $verstring"` -update_registry ${output_objdir}/so_locations -o $lib~ $RM $lib.exp' hardcode_libdir_flag_spec_CXX='-rpath $libdir' ;; esac hardcode_libdir_separator_CXX=: # Commands to make compiler produce verbose output that lists # what "hidden" libraries, object files and flags are used when # linking a shared library. # # There doesn't appear to be a way to prevent this compiler from # explicitly linking system object files so we need to strip them # from the output so that they don't get included in the library # dependencies. output_verbose_link_cmd='templist=`$CC -shared $CFLAGS -v conftest.$objext 2>&1 | $GREP "ld" | $GREP -v "ld:"`; templist=`func_echo_all "$templist" | $SED "s/\(^.*ld.*\)\( .*ld.*$\)/\1/"`; list=""; for z in $templist; do case $z in conftest.$objext) list="$list $z";; *.$objext);; *) list="$list $z";;esac; done; func_echo_all "$list"' ;; *) if test "$GXX" = yes && test "$with_gnu_ld" = no; then allow_undefined_flag_CXX=' ${wl}-expect_unresolved ${wl}\*' case $host in osf3*) archive_cmds_CXX='$CC -shared -nostdlib ${allow_undefined_flag} $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && func_echo_all "${wl}-set_version ${wl}$verstring"` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib' ;; *) archive_cmds_CXX='$CC -shared $pic_flag -nostdlib ${allow_undefined_flag} $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-msym ${wl}-soname ${wl}$soname `test -n "$verstring" && func_echo_all "${wl}-set_version ${wl}$verstring"` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib' ;; esac hardcode_libdir_flag_spec_CXX='${wl}-rpath ${wl}$libdir' hardcode_libdir_separator_CXX=: # Commands to make compiler produce verbose output that lists # what "hidden" libraries, object files and flags are used when # linking a shared library. output_verbose_link_cmd='$CC -shared $CFLAGS -v conftest.$objext 2>&1 | $GREP -v "^Configured with:" | $GREP "\-L"' else # FIXME: insert proper C++ library support ld_shlibs_CXX=no fi ;; esac ;; psos*) # FIXME: insert proper C++ library support ld_shlibs_CXX=no ;; sunos4*) case $cc_basename in CC*) # Sun C++ 4.x # FIXME: insert proper C++ library support ld_shlibs_CXX=no ;; lcc*) # Lucid # FIXME: insert proper C++ library support ld_shlibs_CXX=no ;; *) # FIXME: insert proper C++ library support ld_shlibs_CXX=no ;; esac ;; solaris*) case $cc_basename in CC* | sunCC*) # Sun C++ 4.2, 5.x and Centerline C++ archive_cmds_need_lc_CXX=yes no_undefined_flag_CXX=' -zdefs' archive_cmds_CXX='$CC -G${allow_undefined_flag} -h$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' archive_expsym_cmds_CXX='echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~echo "local: *; };" >> $lib.exp~ $CC -G${allow_undefined_flag} ${wl}-M ${wl}$lib.exp -h$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags~$RM $lib.exp' hardcode_libdir_flag_spec_CXX='-R$libdir' hardcode_shlibpath_var_CXX=no case $host_os in solaris2.[0-5] | solaris2.[0-5].*) ;; *) # The compiler driver will combine and reorder linker options, # but understands `-z linker_flag'. # Supported since Solaris 2.6 (maybe 2.5.1?) whole_archive_flag_spec_CXX='-z allextract$convenience -z defaultextract' ;; esac link_all_deplibs_CXX=yes output_verbose_link_cmd='func_echo_all' # Archives containing C++ object files must be created using # "CC -xar", where "CC" is the Sun C++ compiler. This is # necessary to make sure instantiated templates are included # in the archive. old_archive_cmds_CXX='$CC -xar -o $oldlib $oldobjs' ;; gcx*) # Green Hills C++ Compiler archive_cmds_CXX='$CC -shared $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-h $wl$soname -o $lib' # The C++ compiler must be used to create the archive. old_archive_cmds_CXX='$CC $LDFLAGS -archive -o $oldlib $oldobjs' ;; *) # GNU C++ compiler with Solaris linker if test "$GXX" = yes && test "$with_gnu_ld" = no; then no_undefined_flag_CXX=' ${wl}-z ${wl}defs' if $CC --version | $GREP -v '^2\.7' > /dev/null; then archive_cmds_CXX='$CC -shared $pic_flag -nostdlib $LDFLAGS $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-h $wl$soname -o $lib' archive_expsym_cmds_CXX='echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~echo "local: *; };" >> $lib.exp~ $CC -shared $pic_flag -nostdlib ${wl}-M $wl$lib.exp -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags~$RM $lib.exp' # Commands to make compiler produce verbose output that lists # what "hidden" libraries, object files and flags are used when # linking a shared library. output_verbose_link_cmd='$CC -shared $CFLAGS -v conftest.$objext 2>&1 | $GREP -v "^Configured with:" | $GREP "\-L"' else # g++ 2.7 appears to require `-G' NOT `-shared' on this # platform. archive_cmds_CXX='$CC -G -nostdlib $LDFLAGS $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-h $wl$soname -o $lib' archive_expsym_cmds_CXX='echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~echo "local: *; };" >> $lib.exp~ $CC -G -nostdlib ${wl}-M $wl$lib.exp -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags~$RM $lib.exp' # Commands to make compiler produce verbose output that lists # what "hidden" libraries, object files and flags are used when # linking a shared library. output_verbose_link_cmd='$CC -G $CFLAGS -v conftest.$objext 2>&1 | $GREP -v "^Configured with:" | $GREP "\-L"' fi hardcode_libdir_flag_spec_CXX='${wl}-R $wl$libdir' case $host_os in solaris2.[0-5] | solaris2.[0-5].*) ;; *) whole_archive_flag_spec_CXX='${wl}-z ${wl}allextract$convenience ${wl}-z ${wl}defaultextract' ;; esac fi ;; esac ;; sysv4*uw2* | sysv5OpenUNIX* | sysv5UnixWare7.[01].[10]* | unixware7* | sco3.2v5.0.[024]*) no_undefined_flag_CXX='${wl}-z,text' archive_cmds_need_lc_CXX=no hardcode_shlibpath_var_CXX=no runpath_var='LD_RUN_PATH' case $cc_basename in CC*) archive_cmds_CXX='$CC -G ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' archive_expsym_cmds_CXX='$CC -G ${wl}-Bexport:$export_symbols ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' ;; *) archive_cmds_CXX='$CC -shared ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' archive_expsym_cmds_CXX='$CC -shared ${wl}-Bexport:$export_symbols ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' ;; esac ;; sysv5* | sco3.2v5* | sco5v6*) # Note: We can NOT use -z defs as we might desire, because we do not # link with -lc, and that would cause any symbols used from libc to # always be unresolved, which means just about no library would # ever link correctly. If we're not using GNU ld we use -z text # though, which does catch some bad symbols but isn't as heavy-handed # as -z defs. no_undefined_flag_CXX='${wl}-z,text' allow_undefined_flag_CXX='${wl}-z,nodefs' archive_cmds_need_lc_CXX=no hardcode_shlibpath_var_CXX=no hardcode_libdir_flag_spec_CXX='${wl}-R,$libdir' hardcode_libdir_separator_CXX=':' link_all_deplibs_CXX=yes export_dynamic_flag_spec_CXX='${wl}-Bexport' runpath_var='LD_RUN_PATH' case $cc_basename in CC*) archive_cmds_CXX='$CC -G ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' archive_expsym_cmds_CXX='$CC -G ${wl}-Bexport:$export_symbols ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' old_archive_cmds_CXX='$CC -Tprelink_objects $oldobjs~ '"$old_archive_cmds_CXX" reload_cmds_CXX='$CC -Tprelink_objects $reload_objs~ '"$reload_cmds_CXX" ;; *) archive_cmds_CXX='$CC -shared ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' archive_expsym_cmds_CXX='$CC -shared ${wl}-Bexport:$export_symbols ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' ;; esac ;; tandem*) case $cc_basename in NCC*) # NonStop-UX NCC 3.20 # FIXME: insert proper C++ library support ld_shlibs_CXX=no ;; *) # FIXME: insert proper C++ library support ld_shlibs_CXX=no ;; esac ;; vxworks*) # FIXME: insert proper C++ library support ld_shlibs_CXX=no ;; *) # FIXME: insert proper C++ library support ld_shlibs_CXX=no ;; esac { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ld_shlibs_CXX" >&5 $as_echo "$ld_shlibs_CXX" >&6; } test "$ld_shlibs_CXX" = no && can_build_shared=no GCC_CXX="$GXX" LD_CXX="$LD" ## CAVEAT EMPTOR: ## There is no encapsulation within the following macros, do not change ## the running order or otherwise move them around unless you know exactly ## what you are doing... # Dependencies to place before and after the object being linked: predep_objects_CXX= postdep_objects_CXX= predeps_CXX= postdeps_CXX= compiler_lib_search_path_CXX= cat > conftest.$ac_ext <<_LT_EOF class Foo { public: Foo (void) { a = 0; } private: int a; }; _LT_EOF _lt_libdeps_save_CFLAGS=$CFLAGS case "$CC $CFLAGS " in #( *\ -flto*\ *) CFLAGS="$CFLAGS -fno-lto" ;; *\ -fwhopr*\ *) CFLAGS="$CFLAGS -fno-whopr" ;; *\ -fuse-linker-plugin*\ *) CFLAGS="$CFLAGS -fno-use-linker-plugin" ;; esac if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_compile\""; } >&5 (eval $ac_compile) 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; }; then # Parse the compiler output and extract the necessary # objects, libraries and library flags. # Sentinel used to keep track of whether or not we are before # the conftest object file. pre_test_object_deps_done=no for p in `eval "$output_verbose_link_cmd"`; do case ${prev}${p} in -L* | -R* | -l*) # Some compilers place space between "-{L,R}" and the path. # Remove the space. if test $p = "-L" || test $p = "-R"; then prev=$p continue fi # Expand the sysroot to ease extracting the directories later. if test -z "$prev"; then case $p in -L*) func_stripname_cnf '-L' '' "$p"; prev=-L; p=$func_stripname_result ;; -R*) func_stripname_cnf '-R' '' "$p"; prev=-R; p=$func_stripname_result ;; -l*) func_stripname_cnf '-l' '' "$p"; prev=-l; p=$func_stripname_result ;; esac fi case $p in =*) func_stripname_cnf '=' '' "$p"; p=$lt_sysroot$func_stripname_result ;; esac if test "$pre_test_object_deps_done" = no; then case ${prev} in -L | -R) # Internal compiler library paths should come after those # provided the user. The postdeps already come after the # user supplied libs so there is no need to process them. if test -z "$compiler_lib_search_path_CXX"; then compiler_lib_search_path_CXX="${prev}${p}" else compiler_lib_search_path_CXX="${compiler_lib_search_path_CXX} ${prev}${p}" fi ;; # The "-l" case would never come before the object being # linked, so don't bother handling this case. esac else if test -z "$postdeps_CXX"; then postdeps_CXX="${prev}${p}" else postdeps_CXX="${postdeps_CXX} ${prev}${p}" fi fi prev= ;; *.lto.$objext) ;; # Ignore GCC LTO objects *.$objext) # This assumes that the test object file only shows up # once in the compiler output. if test "$p" = "conftest.$objext"; then pre_test_object_deps_done=yes continue fi if test "$pre_test_object_deps_done" = no; then if test -z "$predep_objects_CXX"; then predep_objects_CXX="$p" else predep_objects_CXX="$predep_objects_CXX $p" fi else if test -z "$postdep_objects_CXX"; then postdep_objects_CXX="$p" else postdep_objects_CXX="$postdep_objects_CXX $p" fi fi ;; *) ;; # Ignore the rest. esac done # Clean up. rm -f a.out a.exe else echo "libtool.m4: error: problem compiling CXX test program" fi $RM -f confest.$objext CFLAGS=$_lt_libdeps_save_CFLAGS # PORTME: override above test on systems where it is broken case $host_os in interix[3-9]*) # Interix 3.5 installs completely hosed .la files for C++, so rather than # hack all around it, let's just trust "g++" to DTRT. predep_objects_CXX= postdep_objects_CXX= postdeps_CXX= ;; linux*) case `$CC -V 2>&1 | sed 5q` in *Sun\ C*) # Sun C++ 5.9 # The more standards-conforming stlport4 library is # incompatible with the Cstd library. Avoid specifying # it if it's in CXXFLAGS. Ignore libCrun as # -library=stlport4 depends on it. case " $CXX $CXXFLAGS " in *" -library=stlport4 "*) solaris_use_stlport4=yes ;; esac if test "$solaris_use_stlport4" != yes; then postdeps_CXX='-library=Cstd -library=Crun' fi ;; esac ;; solaris*) case $cc_basename in CC* | sunCC*) # The more standards-conforming stlport4 library is # incompatible with the Cstd library. Avoid specifying # it if it's in CXXFLAGS. Ignore libCrun as # -library=stlport4 depends on it. case " $CXX $CXXFLAGS " in *" -library=stlport4 "*) solaris_use_stlport4=yes ;; esac # Adding this requires a known-good setup of shared libraries for # Sun compiler versions before 5.6, else PIC objects from an old # archive will be linked into the output, leading to subtle bugs. if test "$solaris_use_stlport4" != yes; then postdeps_CXX='-library=Cstd -library=Crun' fi ;; esac ;; esac case " $postdeps_CXX " in *" -lc "*) archive_cmds_need_lc_CXX=no ;; esac compiler_lib_search_dirs_CXX= if test -n "${compiler_lib_search_path_CXX}"; then compiler_lib_search_dirs_CXX=`echo " ${compiler_lib_search_path_CXX}" | ${SED} -e 's! -L! !g' -e 's!^ !!'` fi lt_prog_compiler_wl_CXX= lt_prog_compiler_pic_CXX= lt_prog_compiler_static_CXX= # C++ specific cases for pic, static, wl, etc. if test "$GXX" = yes; then lt_prog_compiler_wl_CXX='-Wl,' lt_prog_compiler_static_CXX='-static' case $host_os in aix*) # All AIX code is PIC. if test "$host_cpu" = ia64; then # AIX 5 now supports IA64 processor lt_prog_compiler_static_CXX='-Bstatic' fi ;; amigaos*) case $host_cpu in powerpc) # see comment about AmigaOS4 .so support lt_prog_compiler_pic_CXX='-fPIC' ;; m68k) # FIXME: we need at least 68020 code to build shared libraries, but # adding the `-m68020' flag to GCC prevents building anything better, # like `-m68040'. lt_prog_compiler_pic_CXX='-m68020 -resident32 -malways-restore-a4' ;; esac ;; beos* | irix5* | irix6* | nonstopux* | osf3* | osf4* | osf5*) # PIC is the default for these OSes. ;; mingw* | cygwin* | os2* | pw32* | cegcc*) # This hack is so that the source file can tell whether it is being # built for inclusion in a dll (and should export symbols for example). # Although the cygwin gcc ignores -fPIC, still need this for old-style # (--disable-auto-import) libraries lt_prog_compiler_pic_CXX='-DDLL_EXPORT' ;; darwin* | rhapsody*) # PIC is the default on this platform # Common symbols not allowed in MH_DYLIB files lt_prog_compiler_pic_CXX='-fno-common' ;; *djgpp*) # DJGPP does not support shared libraries at all lt_prog_compiler_pic_CXX= ;; haiku*) # PIC is the default for Haiku. # The "-static" flag exists, but is broken. lt_prog_compiler_static_CXX= ;; interix[3-9]*) # Interix 3.x gcc -fpic/-fPIC options generate broken code. # Instead, we relocate shared libraries at runtime. ;; sysv4*MP*) if test -d /usr/nec; then lt_prog_compiler_pic_CXX=-Kconform_pic fi ;; hpux*) # PIC is the default for 64-bit PA HP-UX, but not for 32-bit # PA HP-UX. On IA64 HP-UX, PIC is the default but the pic flag # sets the default TLS model and affects inlining. case $host_cpu in hppa*64*) ;; *) lt_prog_compiler_pic_CXX='-fPIC' ;; esac ;; *qnx* | *nto*) # QNX uses GNU C++, but need to define -shared option too, otherwise # it will coredump. lt_prog_compiler_pic_CXX='-fPIC -shared' ;; *) lt_prog_compiler_pic_CXX='-fPIC' ;; esac else case $host_os in aix[4-9]*) # All AIX code is PIC. if test "$host_cpu" = ia64; then # AIX 5 now supports IA64 processor lt_prog_compiler_static_CXX='-Bstatic' else lt_prog_compiler_static_CXX='-bnso -bI:/lib/syscalls.exp' fi ;; chorus*) case $cc_basename in cxch68*) # Green Hills C++ Compiler # _LT_TAGVAR(lt_prog_compiler_static, CXX)="--no_auto_instantiation -u __main -u __premain -u _abort -r $COOL_DIR/lib/libOrb.a $MVME_DIR/lib/CC/libC.a $MVME_DIR/lib/classix/libcx.s.a" ;; esac ;; mingw* | cygwin* | os2* | pw32* | cegcc*) # This hack is so that the source file can tell whether it is being # built for inclusion in a dll (and should export symbols for example). lt_prog_compiler_pic_CXX='-DDLL_EXPORT' ;; dgux*) case $cc_basename in ec++*) lt_prog_compiler_pic_CXX='-KPIC' ;; ghcx*) # Green Hills C++ Compiler lt_prog_compiler_pic_CXX='-pic' ;; *) ;; esac ;; freebsd* | dragonfly*) # FreeBSD uses GNU C++ ;; hpux9* | hpux10* | hpux11*) case $cc_basename in CC*) lt_prog_compiler_wl_CXX='-Wl,' lt_prog_compiler_static_CXX='${wl}-a ${wl}archive' if test "$host_cpu" != ia64; then lt_prog_compiler_pic_CXX='+Z' fi ;; aCC*) lt_prog_compiler_wl_CXX='-Wl,' lt_prog_compiler_static_CXX='${wl}-a ${wl}archive' case $host_cpu in hppa*64*|ia64*) # +Z the default ;; *) lt_prog_compiler_pic_CXX='+Z' ;; esac ;; *) ;; esac ;; interix*) # This is c89, which is MS Visual C++ (no shared libs) # Anyone wants to do a port? ;; irix5* | irix6* | nonstopux*) case $cc_basename in CC*) lt_prog_compiler_wl_CXX='-Wl,' lt_prog_compiler_static_CXX='-non_shared' # CC pic flag -KPIC is the default. ;; *) ;; esac ;; linux* | k*bsd*-gnu | kopensolaris*-gnu) case $cc_basename in KCC*) # KAI C++ Compiler lt_prog_compiler_wl_CXX='--backend -Wl,' lt_prog_compiler_pic_CXX='-fPIC' ;; ecpc* ) # old Intel C++ for x86_64 which still supported -KPIC. lt_prog_compiler_wl_CXX='-Wl,' lt_prog_compiler_pic_CXX='-KPIC' lt_prog_compiler_static_CXX='-static' ;; icpc* ) # Intel C++, used to be incompatible with GCC. # ICC 10 doesn't accept -KPIC any more. lt_prog_compiler_wl_CXX='-Wl,' lt_prog_compiler_pic_CXX='-fPIC' lt_prog_compiler_static_CXX='-static' ;; pgCC* | pgcpp*) # Portland Group C++ compiler lt_prog_compiler_wl_CXX='-Wl,' lt_prog_compiler_pic_CXX='-fpic' lt_prog_compiler_static_CXX='-Bstatic' ;; cxx*) # Compaq C++ # Make sure the PIC flag is empty. It appears that all Alpha # Linux and Compaq Tru64 Unix objects are PIC. lt_prog_compiler_pic_CXX= lt_prog_compiler_static_CXX='-non_shared' ;; xlc* | xlC* | bgxl[cC]* | mpixl[cC]*) # IBM XL 8.0, 9.0 on PPC and BlueGene lt_prog_compiler_wl_CXX='-Wl,' lt_prog_compiler_pic_CXX='-qpic' lt_prog_compiler_static_CXX='-qstaticlink' ;; *) case `$CC -V 2>&1 | sed 5q` in *Sun\ C*) # Sun C++ 5.9 lt_prog_compiler_pic_CXX='-KPIC' lt_prog_compiler_static_CXX='-Bstatic' lt_prog_compiler_wl_CXX='-Qoption ld ' ;; esac ;; esac ;; lynxos*) ;; m88k*) ;; mvs*) case $cc_basename in cxx*) lt_prog_compiler_pic_CXX='-W c,exportall' ;; *) ;; esac ;; netbsd*) ;; *qnx* | *nto*) # QNX uses GNU C++, but need to define -shared option too, otherwise # it will coredump. lt_prog_compiler_pic_CXX='-fPIC -shared' ;; osf3* | osf4* | osf5*) case $cc_basename in KCC*) lt_prog_compiler_wl_CXX='--backend -Wl,' ;; RCC*) # Rational C++ 2.4.1 lt_prog_compiler_pic_CXX='-pic' ;; cxx*) # Digital/Compaq C++ lt_prog_compiler_wl_CXX='-Wl,' # Make sure the PIC flag is empty. It appears that all Alpha # Linux and Compaq Tru64 Unix objects are PIC. lt_prog_compiler_pic_CXX= lt_prog_compiler_static_CXX='-non_shared' ;; *) ;; esac ;; psos*) ;; solaris*) case $cc_basename in CC* | sunCC*) # Sun C++ 4.2, 5.x and Centerline C++ lt_prog_compiler_pic_CXX='-KPIC' lt_prog_compiler_static_CXX='-Bstatic' lt_prog_compiler_wl_CXX='-Qoption ld ' ;; gcx*) # Green Hills C++ Compiler lt_prog_compiler_pic_CXX='-PIC' ;; *) ;; esac ;; sunos4*) case $cc_basename in CC*) # Sun C++ 4.x lt_prog_compiler_pic_CXX='-pic' lt_prog_compiler_static_CXX='-Bstatic' ;; lcc*) # Lucid lt_prog_compiler_pic_CXX='-pic' ;; *) ;; esac ;; sysv5* | unixware* | sco3.2v5* | sco5v6* | OpenUNIX*) case $cc_basename in CC*) lt_prog_compiler_wl_CXX='-Wl,' lt_prog_compiler_pic_CXX='-KPIC' lt_prog_compiler_static_CXX='-Bstatic' ;; esac ;; tandem*) case $cc_basename in NCC*) # NonStop-UX NCC 3.20 lt_prog_compiler_pic_CXX='-KPIC' ;; *) ;; esac ;; vxworks*) ;; *) lt_prog_compiler_can_build_shared_CXX=no ;; esac fi case $host_os in # For platforms which do not support PIC, -DPIC is meaningless: *djgpp*) lt_prog_compiler_pic_CXX= ;; *) lt_prog_compiler_pic_CXX="$lt_prog_compiler_pic_CXX -DPIC" ;; esac { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $compiler option to produce PIC" >&5 $as_echo_n "checking for $compiler option to produce PIC... " >&6; } if ${lt_cv_prog_compiler_pic_CXX+:} false; then : $as_echo_n "(cached) " >&6 else lt_cv_prog_compiler_pic_CXX=$lt_prog_compiler_pic_CXX fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_compiler_pic_CXX" >&5 $as_echo "$lt_cv_prog_compiler_pic_CXX" >&6; } lt_prog_compiler_pic_CXX=$lt_cv_prog_compiler_pic_CXX # # Check to make sure the PIC flag actually works. # if test -n "$lt_prog_compiler_pic_CXX"; then { $as_echo "$as_me:${as_lineno-$LINENO}: checking if $compiler PIC flag $lt_prog_compiler_pic_CXX works" >&5 $as_echo_n "checking if $compiler PIC flag $lt_prog_compiler_pic_CXX works... " >&6; } if ${lt_cv_prog_compiler_pic_works_CXX+:} false; then : $as_echo_n "(cached) " >&6 else lt_cv_prog_compiler_pic_works_CXX=no ac_outfile=conftest.$ac_objext echo "$lt_simple_compile_test_code" > conftest.$ac_ext lt_compiler_flag="$lt_prog_compiler_pic_CXX -DPIC" # Insert the option either (1) after the last *FLAGS variable, or # (2) before a word containing "conftest.", or (3) at the end. # Note that $ac_compile itself does not contain backslashes and begins # with a dollar sign (not a hyphen), so the echo should work correctly. # The option is referenced via a variable to avoid confusing sed. lt_compile=`echo "$ac_compile" | $SED \ -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \ -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \ -e 's:$: $lt_compiler_flag:'` (eval echo "\"\$as_me:$LINENO: $lt_compile\"" >&5) (eval "$lt_compile" 2>conftest.err) ac_status=$? cat conftest.err >&5 echo "$as_me:$LINENO: \$? = $ac_status" >&5 if (exit $ac_status) && test -s "$ac_outfile"; then # The compiler can only warn and ignore the option if not recognized # So say no if there are warnings other than the usual output. $ECHO "$_lt_compiler_boilerplate" | $SED '/^$/d' >conftest.exp $SED '/^$/d; /^ *+/d' conftest.err >conftest.er2 if test ! -s conftest.er2 || diff conftest.exp conftest.er2 >/dev/null; then lt_cv_prog_compiler_pic_works_CXX=yes fi fi $RM conftest* fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_compiler_pic_works_CXX" >&5 $as_echo "$lt_cv_prog_compiler_pic_works_CXX" >&6; } if test x"$lt_cv_prog_compiler_pic_works_CXX" = xyes; then case $lt_prog_compiler_pic_CXX in "" | " "*) ;; *) lt_prog_compiler_pic_CXX=" $lt_prog_compiler_pic_CXX" ;; esac else lt_prog_compiler_pic_CXX= lt_prog_compiler_can_build_shared_CXX=no fi fi # # Check to make sure the static flag actually works. # wl=$lt_prog_compiler_wl_CXX eval lt_tmp_static_flag=\"$lt_prog_compiler_static_CXX\" { $as_echo "$as_me:${as_lineno-$LINENO}: checking if $compiler static flag $lt_tmp_static_flag works" >&5 $as_echo_n "checking if $compiler static flag $lt_tmp_static_flag works... " >&6; } if ${lt_cv_prog_compiler_static_works_CXX+:} false; then : $as_echo_n "(cached) " >&6 else lt_cv_prog_compiler_static_works_CXX=no save_LDFLAGS="$LDFLAGS" LDFLAGS="$LDFLAGS $lt_tmp_static_flag" echo "$lt_simple_link_test_code" > conftest.$ac_ext if (eval $ac_link 2>conftest.err) && test -s conftest$ac_exeext; then # The linker can only warn and ignore the option if not recognized # So say no if there are warnings if test -s conftest.err; then # Append any errors to the config.log. cat conftest.err 1>&5 $ECHO "$_lt_linker_boilerplate" | $SED '/^$/d' > conftest.exp $SED '/^$/d; /^ *+/d' conftest.err >conftest.er2 if diff conftest.exp conftest.er2 >/dev/null; then lt_cv_prog_compiler_static_works_CXX=yes fi else lt_cv_prog_compiler_static_works_CXX=yes fi fi $RM -r conftest* LDFLAGS="$save_LDFLAGS" fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_compiler_static_works_CXX" >&5 $as_echo "$lt_cv_prog_compiler_static_works_CXX" >&6; } if test x"$lt_cv_prog_compiler_static_works_CXX" = xyes; then : else lt_prog_compiler_static_CXX= fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking if $compiler supports -c -o file.$ac_objext" >&5 $as_echo_n "checking if $compiler supports -c -o file.$ac_objext... " >&6; } if ${lt_cv_prog_compiler_c_o_CXX+:} false; then : $as_echo_n "(cached) " >&6 else lt_cv_prog_compiler_c_o_CXX=no $RM -r conftest 2>/dev/null mkdir conftest cd conftest mkdir out echo "$lt_simple_compile_test_code" > conftest.$ac_ext lt_compiler_flag="-o out/conftest2.$ac_objext" # Insert the option either (1) after the last *FLAGS variable, or # (2) before a word containing "conftest.", or (3) at the end. # Note that $ac_compile itself does not contain backslashes and begins # with a dollar sign (not a hyphen), so the echo should work correctly. lt_compile=`echo "$ac_compile" | $SED \ -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \ -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \ -e 's:$: $lt_compiler_flag:'` (eval echo "\"\$as_me:$LINENO: $lt_compile\"" >&5) (eval "$lt_compile" 2>out/conftest.err) ac_status=$? cat out/conftest.err >&5 echo "$as_me:$LINENO: \$? = $ac_status" >&5 if (exit $ac_status) && test -s out/conftest2.$ac_objext then # The compiler can only warn and ignore the option if not recognized # So say no if there are warnings $ECHO "$_lt_compiler_boilerplate" | $SED '/^$/d' > out/conftest.exp $SED '/^$/d; /^ *+/d' out/conftest.err >out/conftest.er2 if test ! -s out/conftest.er2 || diff out/conftest.exp out/conftest.er2 >/dev/null; then lt_cv_prog_compiler_c_o_CXX=yes fi fi chmod u+w . 2>&5 $RM conftest* # SGI C++ compiler will create directory out/ii_files/ for # template instantiation test -d out/ii_files && $RM out/ii_files/* && rmdir out/ii_files $RM out/* && rmdir out cd .. $RM -r conftest $RM conftest* fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_compiler_c_o_CXX" >&5 $as_echo "$lt_cv_prog_compiler_c_o_CXX" >&6; } { $as_echo "$as_me:${as_lineno-$LINENO}: checking if $compiler supports -c -o file.$ac_objext" >&5 $as_echo_n "checking if $compiler supports -c -o file.$ac_objext... " >&6; } if ${lt_cv_prog_compiler_c_o_CXX+:} false; then : $as_echo_n "(cached) " >&6 else lt_cv_prog_compiler_c_o_CXX=no $RM -r conftest 2>/dev/null mkdir conftest cd conftest mkdir out echo "$lt_simple_compile_test_code" > conftest.$ac_ext lt_compiler_flag="-o out/conftest2.$ac_objext" # Insert the option either (1) after the last *FLAGS variable, or # (2) before a word containing "conftest.", or (3) at the end. # Note that $ac_compile itself does not contain backslashes and begins # with a dollar sign (not a hyphen), so the echo should work correctly. lt_compile=`echo "$ac_compile" | $SED \ -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \ -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \ -e 's:$: $lt_compiler_flag:'` (eval echo "\"\$as_me:$LINENO: $lt_compile\"" >&5) (eval "$lt_compile" 2>out/conftest.err) ac_status=$? cat out/conftest.err >&5 echo "$as_me:$LINENO: \$? = $ac_status" >&5 if (exit $ac_status) && test -s out/conftest2.$ac_objext then # The compiler can only warn and ignore the option if not recognized # So say no if there are warnings $ECHO "$_lt_compiler_boilerplate" | $SED '/^$/d' > out/conftest.exp $SED '/^$/d; /^ *+/d' out/conftest.err >out/conftest.er2 if test ! -s out/conftest.er2 || diff out/conftest.exp out/conftest.er2 >/dev/null; then lt_cv_prog_compiler_c_o_CXX=yes fi fi chmod u+w . 2>&5 $RM conftest* # SGI C++ compiler will create directory out/ii_files/ for # template instantiation test -d out/ii_files && $RM out/ii_files/* && rmdir out/ii_files $RM out/* && rmdir out cd .. $RM -r conftest $RM conftest* fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_compiler_c_o_CXX" >&5 $as_echo "$lt_cv_prog_compiler_c_o_CXX" >&6; } hard_links="nottested" if test "$lt_cv_prog_compiler_c_o_CXX" = no && test "$need_locks" != no; then # do not overwrite the value of need_locks provided by the user { $as_echo "$as_me:${as_lineno-$LINENO}: checking if we can lock with hard links" >&5 $as_echo_n "checking if we can lock with hard links... " >&6; } hard_links=yes $RM conftest* ln conftest.a conftest.b 2>/dev/null && hard_links=no touch conftest.a ln conftest.a conftest.b 2>&5 || hard_links=no ln conftest.a conftest.b 2>/dev/null && hard_links=no { $as_echo "$as_me:${as_lineno-$LINENO}: result: $hard_links" >&5 $as_echo "$hard_links" >&6; } if test "$hard_links" = no; then { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: \`$CC' does not support \`-c -o', so \`make -j' may be unsafe" >&5 $as_echo "$as_me: WARNING: \`$CC' does not support \`-c -o', so \`make -j' may be unsafe" >&2;} need_locks=warn fi else need_locks=no fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether the $compiler linker ($LD) supports shared libraries" >&5 $as_echo_n "checking whether the $compiler linker ($LD) supports shared libraries... " >&6; } export_symbols_cmds_CXX='$NM $libobjs $convenience | $global_symbol_pipe | $SED '\''s/.* //'\'' | sort | uniq > $export_symbols' exclude_expsyms_CXX='_GLOBAL_OFFSET_TABLE_|_GLOBAL__F[ID]_.*' case $host_os in aix[4-9]*) # If we're using GNU nm, then we don't want the "-C" option. # -C means demangle to AIX nm, but means don't demangle with GNU nm # Also, AIX nm treats weak defined symbols like other global defined # symbols, whereas GNU nm marks them as "W". if $NM -V 2>&1 | $GREP 'GNU' > /dev/null; then export_symbols_cmds_CXX='$NM -Bpg $libobjs $convenience | awk '\''{ if (((\$ 2 == "T") || (\$ 2 == "D") || (\$ 2 == "B") || (\$ 2 == "W")) && (substr(\$ 3,1,1) != ".")) { print \$ 3 } }'\'' | sort -u > $export_symbols' else export_symbols_cmds_CXX='$NM -BCpg $libobjs $convenience | awk '\''{ if (((\$ 2 == "T") || (\$ 2 == "D") || (\$ 2 == "B")) && (substr(\$ 3,1,1) != ".")) { print \$ 3 } }'\'' | sort -u > $export_symbols' fi ;; pw32*) export_symbols_cmds_CXX="$ltdll_cmds" ;; cygwin* | mingw* | cegcc*) case $cc_basename in cl*) exclude_expsyms_CXX='_NULL_IMPORT_DESCRIPTOR|_IMPORT_DESCRIPTOR_.*' ;; *) export_symbols_cmds_CXX='$NM $libobjs $convenience | $global_symbol_pipe | $SED -e '\''/^[BCDGRS][ ]/s/.*[ ]\([^ ]*\)/\1 DATA/;s/^.*[ ]__nm__\([^ ]*\)[ ][^ ]*/\1 DATA/;/^I[ ]/d;/^[AITW][ ]/s/.* //'\'' | sort | uniq > $export_symbols' exclude_expsyms_CXX='[_]+GLOBAL_OFFSET_TABLE_|[_]+GLOBAL__[FID]_.*|[_]+head_[A-Za-z0-9_]+_dll|[A-Za-z0-9_]+_dll_iname' ;; esac ;; *) export_symbols_cmds_CXX='$NM $libobjs $convenience | $global_symbol_pipe | $SED '\''s/.* //'\'' | sort | uniq > $export_symbols' ;; esac { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ld_shlibs_CXX" >&5 $as_echo "$ld_shlibs_CXX" >&6; } test "$ld_shlibs_CXX" = no && can_build_shared=no with_gnu_ld_CXX=$with_gnu_ld # # Do we need to explicitly link libc? # case "x$archive_cmds_need_lc_CXX" in x|xyes) # Assume -lc should be added archive_cmds_need_lc_CXX=yes if test "$enable_shared" = yes && test "$GCC" = yes; then case $archive_cmds_CXX in *'~'*) # FIXME: we may have to deal with multi-command sequences. ;; '$CC '*) # Test whether the compiler implicitly links with -lc since on some # systems, -lgcc has to come before -lc. If gcc already passes -lc # to ld, don't add -lc before -lgcc. { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether -lc should be explicitly linked in" >&5 $as_echo_n "checking whether -lc should be explicitly linked in... " >&6; } if ${lt_cv_archive_cmds_need_lc_CXX+:} false; then : $as_echo_n "(cached) " >&6 else $RM conftest* echo "$lt_simple_compile_test_code" > conftest.$ac_ext if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_compile\""; } >&5 (eval $ac_compile) 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; } 2>conftest.err; then soname=conftest lib=conftest libobjs=conftest.$ac_objext deplibs= wl=$lt_prog_compiler_wl_CXX pic_flag=$lt_prog_compiler_pic_CXX compiler_flags=-v linker_flags=-v verstring= output_objdir=. libname=conftest lt_save_allow_undefined_flag=$allow_undefined_flag_CXX allow_undefined_flag_CXX= if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$archive_cmds_CXX 2\>\&1 \| $GREP \" -lc \" \>/dev/null 2\>\&1\""; } >&5 (eval $archive_cmds_CXX 2\>\&1 \| $GREP \" -lc \" \>/dev/null 2\>\&1) 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; } then lt_cv_archive_cmds_need_lc_CXX=no else lt_cv_archive_cmds_need_lc_CXX=yes fi allow_undefined_flag_CXX=$lt_save_allow_undefined_flag else cat conftest.err 1>&5 fi $RM conftest* fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_archive_cmds_need_lc_CXX" >&5 $as_echo "$lt_cv_archive_cmds_need_lc_CXX" >&6; } archive_cmds_need_lc_CXX=$lt_cv_archive_cmds_need_lc_CXX ;; esac fi ;; esac { $as_echo "$as_me:${as_lineno-$LINENO}: checking dynamic linker characteristics" >&5 $as_echo_n "checking dynamic linker characteristics... " >&6; } library_names_spec= libname_spec='lib$name' soname_spec= shrext_cmds=".so" postinstall_cmds= postuninstall_cmds= finish_cmds= finish_eval= shlibpath_var= shlibpath_overrides_runpath=unknown version_type=none dynamic_linker="$host_os ld.so" sys_lib_dlsearch_path_spec="/lib /usr/lib" need_lib_prefix=unknown hardcode_into_libs=no # when you set need_version to no, make sure it does not cause -set_version # flags to be left without arguments need_version=unknown case $host_os in aix3*) version_type=linux # correct to gnu/linux during the next big refactor library_names_spec='${libname}${release}${shared_ext}$versuffix $libname.a' shlibpath_var=LIBPATH # AIX 3 has no versioning support, so we append a major version to the name. soname_spec='${libname}${release}${shared_ext}$major' ;; aix[4-9]*) version_type=linux # correct to gnu/linux during the next big refactor need_lib_prefix=no need_version=no hardcode_into_libs=yes if test "$host_cpu" = ia64; then # AIX 5 supports IA64 library_names_spec='${libname}${release}${shared_ext}$major ${libname}${release}${shared_ext}$versuffix $libname${shared_ext}' shlibpath_var=LD_LIBRARY_PATH else # With GCC up to 2.95.x, collect2 would create an import file # for dependence libraries. The import file would start with # the line `#! .'. This would cause the generated library to # depend on `.', always an invalid library. This was fixed in # development snapshots of GCC prior to 3.0. case $host_os in aix4 | aix4.[01] | aix4.[01].*) if { echo '#if __GNUC__ > 2 || (__GNUC__ == 2 && __GNUC_MINOR__ >= 97)' echo ' yes ' echo '#endif'; } | ${CC} -E - | $GREP yes > /dev/null; then : else can_build_shared=no fi ;; esac # AIX (on Power*) has no versioning support, so currently we can not hardcode correct # soname into executable. Probably we can add versioning support to # collect2, so additional links can be useful in future. if test "$aix_use_runtimelinking" = yes; then # If using run time linking (on AIX 4.2 or later) use lib.so # instead of lib.a to let people know that these are not # typical AIX shared libraries. library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' else # We preserve .a as extension for shared libraries through AIX4.2 # and later when we are not doing run time linking. library_names_spec='${libname}${release}.a $libname.a' soname_spec='${libname}${release}${shared_ext}$major' fi shlibpath_var=LIBPATH fi ;; amigaos*) case $host_cpu in powerpc) # Since July 2007 AmigaOS4 officially supports .so libraries. # When compiling the executable, add -use-dynld -Lsobjs: to the compileline. library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' ;; m68k) library_names_spec='$libname.ixlibrary $libname.a' # Create ${libname}_ixlibrary.a entries in /sys/libs. finish_eval='for lib in `ls $libdir/*.ixlibrary 2>/dev/null`; do libname=`func_echo_all "$lib" | $SED '\''s%^.*/\([^/]*\)\.ixlibrary$%\1%'\''`; test $RM /sys/libs/${libname}_ixlibrary.a; $show "cd /sys/libs && $LN_S $lib ${libname}_ixlibrary.a"; cd /sys/libs && $LN_S $lib ${libname}_ixlibrary.a || exit 1; done' ;; esac ;; beos*) library_names_spec='${libname}${shared_ext}' dynamic_linker="$host_os ld.so" shlibpath_var=LIBRARY_PATH ;; bsdi[45]*) version_type=linux # correct to gnu/linux during the next big refactor need_version=no library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' soname_spec='${libname}${release}${shared_ext}$major' finish_cmds='PATH="\$PATH:/sbin" ldconfig $libdir' shlibpath_var=LD_LIBRARY_PATH sys_lib_search_path_spec="/shlib /usr/lib /usr/X11/lib /usr/contrib/lib /lib /usr/local/lib" sys_lib_dlsearch_path_spec="/shlib /usr/lib /usr/local/lib" # the default ld.so.conf also contains /usr/contrib/lib and # /usr/X11R6/lib (/usr/X11 is a link to /usr/X11R6), but let us allow # libtool to hard-code these into programs ;; cygwin* | mingw* | pw32* | cegcc*) version_type=windows shrext_cmds=".dll" need_version=no need_lib_prefix=no case $GCC,$cc_basename in yes,*) # gcc library_names_spec='$libname.dll.a' # DLL is installed to $(libdir)/../bin by postinstall_cmds postinstall_cmds='base_file=`basename \${file}`~ dlpath=`$SHELL 2>&1 -c '\''. $dir/'\''\${base_file}'\''i; echo \$dlname'\''`~ dldir=$destdir/`dirname \$dlpath`~ test -d \$dldir || mkdir -p \$dldir~ $install_prog $dir/$dlname \$dldir/$dlname~ chmod a+x \$dldir/$dlname~ if test -n '\''$stripme'\'' && test -n '\''$striplib'\''; then eval '\''$striplib \$dldir/$dlname'\'' || exit \$?; fi' postuninstall_cmds='dldll=`$SHELL 2>&1 -c '\''. $file; echo \$dlname'\''`~ dlpath=$dir/\$dldll~ $RM \$dlpath' shlibpath_overrides_runpath=yes case $host_os in cygwin*) # Cygwin DLLs use 'cyg' prefix rather than 'lib' soname_spec='`echo ${libname} | sed -e 's/^lib/cyg/'``echo ${release} | $SED -e 's/[.]/-/g'`${versuffix}${shared_ext}' ;; mingw* | cegcc*) # MinGW DLLs use traditional 'lib' prefix soname_spec='${libname}`echo ${release} | $SED -e 's/[.]/-/g'`${versuffix}${shared_ext}' ;; pw32*) # pw32 DLLs use 'pw' prefix rather than 'lib' library_names_spec='`echo ${libname} | sed -e 's/^lib/pw/'``echo ${release} | $SED -e 's/[.]/-/g'`${versuffix}${shared_ext}' ;; esac dynamic_linker='Win32 ld.exe' ;; *,cl*) # Native MSVC libname_spec='$name' soname_spec='${libname}`echo ${release} | $SED -e 's/[.]/-/g'`${versuffix}${shared_ext}' library_names_spec='${libname}.dll.lib' case $build_os in mingw*) sys_lib_search_path_spec= lt_save_ifs=$IFS IFS=';' for lt_path in $LIB do IFS=$lt_save_ifs # Let DOS variable expansion print the short 8.3 style file name. lt_path=`cd "$lt_path" 2>/dev/null && cmd //C "for %i in (".") do @echo %~si"` sys_lib_search_path_spec="$sys_lib_search_path_spec $lt_path" done IFS=$lt_save_ifs # Convert to MSYS style. sys_lib_search_path_spec=`$ECHO "$sys_lib_search_path_spec" | sed -e 's|\\\\|/|g' -e 's| \\([a-zA-Z]\\):| /\\1|g' -e 's|^ ||'` ;; cygwin*) # Convert to unix form, then to dos form, then back to unix form # but this time dos style (no spaces!) so that the unix form looks # like /cygdrive/c/PROGRA~1:/cygdr... sys_lib_search_path_spec=`cygpath --path --unix "$LIB"` sys_lib_search_path_spec=`cygpath --path --dos "$sys_lib_search_path_spec" 2>/dev/null` sys_lib_search_path_spec=`cygpath --path --unix "$sys_lib_search_path_spec" | $SED -e "s/$PATH_SEPARATOR/ /g"` ;; *) sys_lib_search_path_spec="$LIB" if $ECHO "$sys_lib_search_path_spec" | $GREP ';[c-zC-Z]:/' >/dev/null; then # It is most probably a Windows format PATH. sys_lib_search_path_spec=`$ECHO "$sys_lib_search_path_spec" | $SED -e 's/;/ /g'` else sys_lib_search_path_spec=`$ECHO "$sys_lib_search_path_spec" | $SED -e "s/$PATH_SEPARATOR/ /g"` fi # FIXME: find the short name or the path components, as spaces are # common. (e.g. "Program Files" -> "PROGRA~1") ;; esac # DLL is installed to $(libdir)/../bin by postinstall_cmds postinstall_cmds='base_file=`basename \${file}`~ dlpath=`$SHELL 2>&1 -c '\''. $dir/'\''\${base_file}'\''i; echo \$dlname'\''`~ dldir=$destdir/`dirname \$dlpath`~ test -d \$dldir || mkdir -p \$dldir~ $install_prog $dir/$dlname \$dldir/$dlname' postuninstall_cmds='dldll=`$SHELL 2>&1 -c '\''. $file; echo \$dlname'\''`~ dlpath=$dir/\$dldll~ $RM \$dlpath' shlibpath_overrides_runpath=yes dynamic_linker='Win32 link.exe' ;; *) # Assume MSVC wrapper library_names_spec='${libname}`echo ${release} | $SED -e 's/[.]/-/g'`${versuffix}${shared_ext} $libname.lib' dynamic_linker='Win32 ld.exe' ;; esac # FIXME: first we should search . and the directory the executable is in shlibpath_var=PATH ;; darwin* | rhapsody*) dynamic_linker="$host_os dyld" version_type=darwin need_lib_prefix=no need_version=no library_names_spec='${libname}${release}${major}$shared_ext ${libname}$shared_ext' soname_spec='${libname}${release}${major}$shared_ext' shlibpath_overrides_runpath=yes shlibpath_var=DYLD_LIBRARY_PATH shrext_cmds='`test .$module = .yes && echo .so || echo .dylib`' sys_lib_dlsearch_path_spec='/usr/local/lib /lib /usr/lib' ;; dgux*) version_type=linux # correct to gnu/linux during the next big refactor need_lib_prefix=no need_version=no library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname$shared_ext' soname_spec='${libname}${release}${shared_ext}$major' shlibpath_var=LD_LIBRARY_PATH ;; freebsd* | dragonfly*) # DragonFly does not have aout. When/if they implement a new # versioning mechanism, adjust this. if test -x /usr/bin/objformat; then objformat=`/usr/bin/objformat` else case $host_os in freebsd[23].*) objformat=aout ;; *) objformat=elf ;; esac fi version_type=freebsd-$objformat case $version_type in freebsd-elf*) library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext} $libname${shared_ext}' need_version=no need_lib_prefix=no ;; freebsd-*) library_names_spec='${libname}${release}${shared_ext}$versuffix $libname${shared_ext}$versuffix' need_version=yes ;; esac shlibpath_var=LD_LIBRARY_PATH case $host_os in freebsd2.*) shlibpath_overrides_runpath=yes ;; freebsd3.[01]* | freebsdelf3.[01]*) shlibpath_overrides_runpath=yes hardcode_into_libs=yes ;; freebsd3.[2-9]* | freebsdelf3.[2-9]* | \ freebsd4.[0-5] | freebsdelf4.[0-5] | freebsd4.1.1 | freebsdelf4.1.1) shlibpath_overrides_runpath=no hardcode_into_libs=yes ;; *) # from 4.6 on, and DragonFly shlibpath_overrides_runpath=yes hardcode_into_libs=yes ;; esac ;; gnu*) version_type=linux # correct to gnu/linux during the next big refactor need_lib_prefix=no need_version=no library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}${major} ${libname}${shared_ext}' soname_spec='${libname}${release}${shared_ext}$major' shlibpath_var=LD_LIBRARY_PATH shlibpath_overrides_runpath=no hardcode_into_libs=yes ;; haiku*) version_type=linux # correct to gnu/linux during the next big refactor need_lib_prefix=no need_version=no dynamic_linker="$host_os runtime_loader" library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}${major} ${libname}${shared_ext}' soname_spec='${libname}${release}${shared_ext}$major' shlibpath_var=LIBRARY_PATH shlibpath_overrides_runpath=yes sys_lib_dlsearch_path_spec='/boot/home/config/lib /boot/common/lib /boot/system/lib' hardcode_into_libs=yes ;; hpux9* | hpux10* | hpux11*) # Give a soname corresponding to the major version so that dld.sl refuses to # link against other versions. version_type=sunos need_lib_prefix=no need_version=no case $host_cpu in ia64*) shrext_cmds='.so' hardcode_into_libs=yes dynamic_linker="$host_os dld.so" shlibpath_var=LD_LIBRARY_PATH shlibpath_overrides_runpath=yes # Unless +noenvvar is specified. library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' soname_spec='${libname}${release}${shared_ext}$major' if test "X$HPUX_IA64_MODE" = X32; then sys_lib_search_path_spec="/usr/lib/hpux32 /usr/local/lib/hpux32 /usr/local/lib" else sys_lib_search_path_spec="/usr/lib/hpux64 /usr/local/lib/hpux64" fi sys_lib_dlsearch_path_spec=$sys_lib_search_path_spec ;; hppa*64*) shrext_cmds='.sl' hardcode_into_libs=yes dynamic_linker="$host_os dld.sl" shlibpath_var=LD_LIBRARY_PATH # How should we handle SHLIB_PATH shlibpath_overrides_runpath=yes # Unless +noenvvar is specified. library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' soname_spec='${libname}${release}${shared_ext}$major' sys_lib_search_path_spec="/usr/lib/pa20_64 /usr/ccs/lib/pa20_64" sys_lib_dlsearch_path_spec=$sys_lib_search_path_spec ;; *) shrext_cmds='.sl' dynamic_linker="$host_os dld.sl" shlibpath_var=SHLIB_PATH shlibpath_overrides_runpath=no # +s is required to enable SHLIB_PATH library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' soname_spec='${libname}${release}${shared_ext}$major' ;; esac # HP-UX runs *really* slowly unless shared libraries are mode 555, ... postinstall_cmds='chmod 555 $lib' # or fails outright, so override atomically: install_override_mode=555 ;; interix[3-9]*) version_type=linux # correct to gnu/linux during the next big refactor need_lib_prefix=no need_version=no library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major ${libname}${shared_ext}' soname_spec='${libname}${release}${shared_ext}$major' dynamic_linker='Interix 3.x ld.so.1 (PE, like ELF)' shlibpath_var=LD_LIBRARY_PATH shlibpath_overrides_runpath=no hardcode_into_libs=yes ;; irix5* | irix6* | nonstopux*) case $host_os in nonstopux*) version_type=nonstopux ;; *) if test "$lt_cv_prog_gnu_ld" = yes; then version_type=linux # correct to gnu/linux during the next big refactor else version_type=irix fi ;; esac need_lib_prefix=no need_version=no soname_spec='${libname}${release}${shared_ext}$major' library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major ${libname}${release}${shared_ext} $libname${shared_ext}' case $host_os in irix5* | nonstopux*) libsuff= shlibsuff= ;; *) case $LD in # libtool.m4 will add one of these switches to LD *-32|*"-32 "|*-melf32bsmip|*"-melf32bsmip ") libsuff= shlibsuff= libmagic=32-bit;; *-n32|*"-n32 "|*-melf32bmipn32|*"-melf32bmipn32 ") libsuff=32 shlibsuff=N32 libmagic=N32;; *-64|*"-64 "|*-melf64bmip|*"-melf64bmip ") libsuff=64 shlibsuff=64 libmagic=64-bit;; *) libsuff= shlibsuff= libmagic=never-match;; esac ;; esac shlibpath_var=LD_LIBRARY${shlibsuff}_PATH shlibpath_overrides_runpath=no sys_lib_search_path_spec="/usr/lib${libsuff} /lib${libsuff} /usr/local/lib${libsuff}" sys_lib_dlsearch_path_spec="/usr/lib${libsuff} /lib${libsuff}" hardcode_into_libs=yes ;; # No shared lib support for Linux oldld, aout, or coff. linux*oldld* | linux*aout* | linux*coff*) dynamic_linker=no ;; # This must be glibc/ELF. linux* | k*bsd*-gnu | kopensolaris*-gnu) version_type=linux # correct to gnu/linux during the next big refactor need_lib_prefix=no need_version=no library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' soname_spec='${libname}${release}${shared_ext}$major' finish_cmds='PATH="\$PATH:/sbin" ldconfig -n $libdir' shlibpath_var=LD_LIBRARY_PATH shlibpath_overrides_runpath=no # Some binutils ld are patched to set DT_RUNPATH if ${lt_cv_shlibpath_overrides_runpath+:} false; then : $as_echo_n "(cached) " >&6 else lt_cv_shlibpath_overrides_runpath=no save_LDFLAGS=$LDFLAGS save_libdir=$libdir eval "libdir=/foo; wl=\"$lt_prog_compiler_wl_CXX\"; \ LDFLAGS=\"\$LDFLAGS $hardcode_libdir_flag_spec_CXX\"" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { ; return 0; } _ACEOF if ac_fn_cxx_try_link "$LINENO"; then : if ($OBJDUMP -p conftest$ac_exeext) 2>/dev/null | grep "RUNPATH.*$libdir" >/dev/null; then : lt_cv_shlibpath_overrides_runpath=yes fi fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext LDFLAGS=$save_LDFLAGS libdir=$save_libdir fi shlibpath_overrides_runpath=$lt_cv_shlibpath_overrides_runpath # This implies no fast_install, which is unacceptable. # Some rework will be needed to allow for fast_install # before this can be enabled. hardcode_into_libs=yes # Add ABI-specific directories to the system library path. sys_lib_dlsearch_path_spec="/lib64 /usr/lib64 /lib /usr/lib" # Append ld.so.conf contents to the search path if test -f /etc/ld.so.conf; then lt_ld_extra=`awk '/^include / { system(sprintf("cd /etc; cat %s 2>/dev/null", \$2)); skip = 1; } { if (!skip) print \$0; skip = 0; }' < /etc/ld.so.conf | $SED -e 's/#.*//;/^[ ]*hwcap[ ]/d;s/[:, ]/ /g;s/=[^=]*$//;s/=[^= ]* / /g;s/"//g;/^$/d' | tr '\n' ' '` sys_lib_dlsearch_path_spec="$sys_lib_dlsearch_path_spec $lt_ld_extra" fi # We used to test for /lib/ld.so.1 and disable shared libraries on # powerpc, because MkLinux only supported shared libraries with the # GNU dynamic linker. Since this was broken with cross compilers, # most powerpc-linux boxes support dynamic linking these days and # people can always --disable-shared, the test was removed, and we # assume the GNU/Linux dynamic linker is in use. dynamic_linker='GNU/Linux ld.so' ;; netbsd*) version_type=sunos need_lib_prefix=no need_version=no if echo __ELF__ | $CC -E - | $GREP __ELF__ >/dev/null; then library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${shared_ext}$versuffix' finish_cmds='PATH="\$PATH:/sbin" ldconfig -m $libdir' dynamic_linker='NetBSD (a.out) ld.so' else library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major ${libname}${shared_ext}' soname_spec='${libname}${release}${shared_ext}$major' dynamic_linker='NetBSD ld.elf_so' fi shlibpath_var=LD_LIBRARY_PATH shlibpath_overrides_runpath=yes hardcode_into_libs=yes ;; newsos6) version_type=linux # correct to gnu/linux during the next big refactor library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' shlibpath_var=LD_LIBRARY_PATH shlibpath_overrides_runpath=yes ;; *nto* | *qnx*) version_type=qnx need_lib_prefix=no need_version=no library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' soname_spec='${libname}${release}${shared_ext}$major' shlibpath_var=LD_LIBRARY_PATH shlibpath_overrides_runpath=no hardcode_into_libs=yes dynamic_linker='ldqnx.so' ;; openbsd*) version_type=sunos sys_lib_dlsearch_path_spec="/usr/lib" need_lib_prefix=no # Some older versions of OpenBSD (3.3 at least) *do* need versioned libs. case $host_os in openbsd3.3 | openbsd3.3.*) need_version=yes ;; *) need_version=no ;; esac library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${shared_ext}$versuffix' finish_cmds='PATH="\$PATH:/sbin" ldconfig -m $libdir' shlibpath_var=LD_LIBRARY_PATH if test -z "`echo __ELF__ | $CC -E - | $GREP __ELF__`" || test "$host_os-$host_cpu" = "openbsd2.8-powerpc"; then case $host_os in openbsd2.[89] | openbsd2.[89].*) shlibpath_overrides_runpath=no ;; *) shlibpath_overrides_runpath=yes ;; esac else shlibpath_overrides_runpath=yes fi ;; os2*) libname_spec='$name' shrext_cmds=".dll" need_lib_prefix=no library_names_spec='$libname${shared_ext} $libname.a' dynamic_linker='OS/2 ld.exe' shlibpath_var=LIBPATH ;; osf3* | osf4* | osf5*) version_type=osf need_lib_prefix=no need_version=no soname_spec='${libname}${release}${shared_ext}$major' library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' shlibpath_var=LD_LIBRARY_PATH sys_lib_search_path_spec="/usr/shlib /usr/ccs/lib /usr/lib/cmplrs/cc /usr/lib /usr/local/lib /var/shlib" sys_lib_dlsearch_path_spec="$sys_lib_search_path_spec" ;; rdos*) dynamic_linker=no ;; solaris*) version_type=linux # correct to gnu/linux during the next big refactor need_lib_prefix=no need_version=no library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' soname_spec='${libname}${release}${shared_ext}$major' shlibpath_var=LD_LIBRARY_PATH shlibpath_overrides_runpath=yes hardcode_into_libs=yes # ldd complains unless libraries are executable postinstall_cmds='chmod +x $lib' ;; sunos4*) version_type=sunos library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${shared_ext}$versuffix' finish_cmds='PATH="\$PATH:/usr/etc" ldconfig $libdir' shlibpath_var=LD_LIBRARY_PATH shlibpath_overrides_runpath=yes if test "$with_gnu_ld" = yes; then need_lib_prefix=no fi need_version=yes ;; sysv4 | sysv4.3*) version_type=linux # correct to gnu/linux during the next big refactor library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' soname_spec='${libname}${release}${shared_ext}$major' shlibpath_var=LD_LIBRARY_PATH case $host_vendor in sni) shlibpath_overrides_runpath=no need_lib_prefix=no runpath_var=LD_RUN_PATH ;; siemens) need_lib_prefix=no ;; motorola) need_lib_prefix=no need_version=no shlibpath_overrides_runpath=no sys_lib_search_path_spec='/lib /usr/lib /usr/ccs/lib' ;; esac ;; sysv4*MP*) if test -d /usr/nec ;then version_type=linux # correct to gnu/linux during the next big refactor library_names_spec='$libname${shared_ext}.$versuffix $libname${shared_ext}.$major $libname${shared_ext}' soname_spec='$libname${shared_ext}.$major' shlibpath_var=LD_LIBRARY_PATH fi ;; sysv5* | sco3.2v5* | sco5v6* | unixware* | OpenUNIX* | sysv4*uw2*) version_type=freebsd-elf need_lib_prefix=no need_version=no library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext} $libname${shared_ext}' soname_spec='${libname}${release}${shared_ext}$major' shlibpath_var=LD_LIBRARY_PATH shlibpath_overrides_runpath=yes hardcode_into_libs=yes if test "$with_gnu_ld" = yes; then sys_lib_search_path_spec='/usr/local/lib /usr/gnu/lib /usr/ccs/lib /usr/lib /lib' else sys_lib_search_path_spec='/usr/ccs/lib /usr/lib' case $host_os in sco3.2v5*) sys_lib_search_path_spec="$sys_lib_search_path_spec /lib" ;; esac fi sys_lib_dlsearch_path_spec='/usr/lib' ;; tpf*) # TPF is a cross-target only. Preferred cross-host = GNU/Linux. version_type=linux # correct to gnu/linux during the next big refactor need_lib_prefix=no need_version=no library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' shlibpath_var=LD_LIBRARY_PATH shlibpath_overrides_runpath=no hardcode_into_libs=yes ;; uts4*) version_type=linux # correct to gnu/linux during the next big refactor library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' soname_spec='${libname}${release}${shared_ext}$major' shlibpath_var=LD_LIBRARY_PATH ;; *) dynamic_linker=no ;; esac { $as_echo "$as_me:${as_lineno-$LINENO}: result: $dynamic_linker" >&5 $as_echo "$dynamic_linker" >&6; } test "$dynamic_linker" = no && can_build_shared=no variables_saved_for_relink="PATH $shlibpath_var $runpath_var" if test "$GCC" = yes; then variables_saved_for_relink="$variables_saved_for_relink GCC_EXEC_PREFIX COMPILER_PATH LIBRARY_PATH" fi if test "${lt_cv_sys_lib_search_path_spec+set}" = set; then sys_lib_search_path_spec="$lt_cv_sys_lib_search_path_spec" fi if test "${lt_cv_sys_lib_dlsearch_path_spec+set}" = set; then sys_lib_dlsearch_path_spec="$lt_cv_sys_lib_dlsearch_path_spec" fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking how to hardcode library paths into programs" >&5 $as_echo_n "checking how to hardcode library paths into programs... " >&6; } hardcode_action_CXX= if test -n "$hardcode_libdir_flag_spec_CXX" || test -n "$runpath_var_CXX" || test "X$hardcode_automatic_CXX" = "Xyes" ; then # We can hardcode non-existent directories. if test "$hardcode_direct_CXX" != no && # If the only mechanism to avoid hardcoding is shlibpath_var, we # have to relink, otherwise we might link with an installed library # when we should be linking with a yet-to-be-installed one ## test "$_LT_TAGVAR(hardcode_shlibpath_var, CXX)" != no && test "$hardcode_minus_L_CXX" != no; then # Linking always hardcodes the temporary library directory. hardcode_action_CXX=relink else # We can link without hardcoding, and we can hardcode nonexisting dirs. hardcode_action_CXX=immediate fi else # We cannot hardcode anything, or else we can only hardcode existing # directories. hardcode_action_CXX=unsupported fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $hardcode_action_CXX" >&5 $as_echo "$hardcode_action_CXX" >&6; } if test "$hardcode_action_CXX" = relink || test "$inherit_rpath_CXX" = yes; then # Fast installation is not supported enable_fast_install=no elif test "$shlibpath_overrides_runpath" = yes || test "$enable_shared" = no; then # Fast installation is not necessary enable_fast_install=needless fi fi # test -n "$compiler" CC=$lt_save_CC CFLAGS=$lt_save_CFLAGS LDCXX=$LD LD=$lt_save_LD GCC=$lt_save_GCC with_gnu_ld=$lt_save_with_gnu_ld lt_cv_path_LDCXX=$lt_cv_path_LD lt_cv_path_LD=$lt_save_path_LD lt_cv_prog_gnu_ldcxx=$lt_cv_prog_gnu_ld lt_cv_prog_gnu_ld=$lt_save_with_gnu_ld fi # test "$_lt_caught_CXX_error" != yes ac_ext=c ac_cpp='$CPP $CPPFLAGS' ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_c_compiler_gnu ac_config_commands="$ac_config_commands libtool" # Only expand once: if test "x$CC" != xcc; then { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether $CC and cc understand -c and -o together" >&5 $as_echo_n "checking whether $CC and cc understand -c and -o together... " >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether cc understands -c and -o together" >&5 $as_echo_n "checking whether cc understands -c and -o together... " >&6; } fi set dummy $CC; ac_cc=`$as_echo "$2" | sed 's/[^a-zA-Z0-9_]/_/g;s/^[0-9]/_/'` if eval \${ac_cv_prog_cc_${ac_cc}_c_o+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { ; return 0; } _ACEOF # Make sure it works both with $CC and with simple cc. # We do the test twice because some compilers refuse to overwrite an # existing .o file with -o, though they will create one. ac_try='$CC -c conftest.$ac_ext -o conftest2.$ac_objext >&5' rm -f conftest2.* if { { case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" $as_echo "$ac_try_echo"; } >&5 (eval "$ac_try") 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; } && test -f conftest2.$ac_objext && { { case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" $as_echo "$ac_try_echo"; } >&5 (eval "$ac_try") 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; }; then eval ac_cv_prog_cc_${ac_cc}_c_o=yes if test "x$CC" != xcc; then # Test first that cc exists at all. if { ac_try='cc -c conftest.$ac_ext >&5' { { case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" $as_echo "$ac_try_echo"; } >&5 (eval "$ac_try") 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; }; }; then ac_try='cc -c conftest.$ac_ext -o conftest2.$ac_objext >&5' rm -f conftest2.* if { { case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" $as_echo "$ac_try_echo"; } >&5 (eval "$ac_try") 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; } && test -f conftest2.$ac_objext && { { case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" $as_echo "$ac_try_echo"; } >&5 (eval "$ac_try") 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; }; then # cc works too. : else # cc exists but doesn't like -o. eval ac_cv_prog_cc_${ac_cc}_c_o=no fi fi fi else eval ac_cv_prog_cc_${ac_cc}_c_o=no fi rm -f core conftest* fi if eval test \$ac_cv_prog_cc_${ac_cc}_c_o = yes; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 $as_echo "yes" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } $as_echo "#define NO_MINUS_C_MINUS_O 1" >>confdefs.h fi # FIXME: we rely on the cache variable name because # there is no other way. set dummy $CC am_cc=`echo $2 | sed 's/[^a-zA-Z0-9_]/_/g;s/^[0-9]/_/'` eval am_t=\$ac_cv_prog_cc_${am_cc}_c_o if test "$am_t" != yes; then # Losing compiler, so override with the script. # FIXME: It is wrong to rewrite CC. # But if we don't then we get into trouble of one sort or another. # A longer-term fix would be to have automake use am__CC in this case, # and then we could set am__CC="\$(top_srcdir)/compile \$(CC)" CC="$am_aux_dir/compile $CC" fi # Check whether --with-gnu-ld was given. if test "${with_gnu_ld+set}" = set; then : withval=$with_gnu_ld; test "$withval" = no || with_gnu_ld=yes else with_gnu_ld=no fi ac_prog=ld if test "$GCC" = yes; then # Check if gcc -print-prog-name=ld gives a path. { $as_echo "$as_me:${as_lineno-$LINENO}: checking for ld used by $CC" >&5 $as_echo_n "checking for ld used by $CC... " >&6; } case $host in *-*-mingw*) # gcc leaves a trailing carriage return which upsets mingw ac_prog=`($CC -print-prog-name=ld) 2>&5 | tr -d '\015'` ;; *) ac_prog=`($CC -print-prog-name=ld) 2>&5` ;; esac case $ac_prog in # Accept absolute paths. [\\/]* | ?:[\\/]*) re_direlt='/[^/][^/]*/\.\./' # Canonicalize the pathname of ld ac_prog=`$ECHO "$ac_prog"| $SED 's%\\\\%/%g'` while $ECHO "$ac_prog" | $GREP "$re_direlt" > /dev/null 2>&1; do ac_prog=`$ECHO $ac_prog| $SED "s%$re_direlt%/%"` done test -z "$LD" && LD="$ac_prog" ;; "") # If it fails, then pretend we aren't using GCC. ac_prog=ld ;; *) # If it is relative, then search for the first ld in PATH. with_gnu_ld=unknown ;; esac elif test "$with_gnu_ld" = yes; then { $as_echo "$as_me:${as_lineno-$LINENO}: checking for GNU ld" >&5 $as_echo_n "checking for GNU ld... " >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: checking for non-GNU ld" >&5 $as_echo_n "checking for non-GNU ld... " >&6; } fi if ${lt_cv_path_LD+:} false; then : $as_echo_n "(cached) " >&6 else if test -z "$LD"; then lt_save_ifs="$IFS"; IFS=$PATH_SEPARATOR for ac_dir in $PATH; do IFS="$lt_save_ifs" test -z "$ac_dir" && ac_dir=. if test -f "$ac_dir/$ac_prog" || test -f "$ac_dir/$ac_prog$ac_exeext"; then lt_cv_path_LD="$ac_dir/$ac_prog" # Check to see if the program is GNU ld. I'd rather use --version, # but apparently some variants of GNU ld only accept -v. # Break only if it was the GNU/non-GNU ld that we prefer. case `"$lt_cv_path_LD" -v 2>&1 &5 $as_echo "$LD" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi test -z "$LD" && as_fn_error $? "no acceptable ld found in \$PATH" "$LINENO" 5 { $as_echo "$as_me:${as_lineno-$LINENO}: checking if the linker ($LD) is GNU ld" >&5 $as_echo_n "checking if the linker ($LD) is GNU ld... " >&6; } if ${lt_cv_prog_gnu_ld+:} false; then : $as_echo_n "(cached) " >&6 else # I'd rather use --version here, but apparently some GNU lds only accept -v. case `$LD -v 2>&1 &5 $as_echo "$lt_cv_prog_gnu_ld" >&6; } with_gnu_ld=$lt_cv_prog_gnu_ld for ac_header in stdint.h stdlib.h string.h sys/socket.h syslog.h unistd.h do : as_ac_Header=`$as_echo "ac_cv_header_$ac_header" | $as_tr_sh` ac_fn_c_check_header_mongrel "$LINENO" "$ac_header" "$as_ac_Header" "$ac_includes_default" if eval test \"x\$"$as_ac_Header"\" = x"yes"; then : cat >>confdefs.h <<_ACEOF #define `$as_echo "HAVE_$ac_header" | $as_tr_cpp` 1 _ACEOF fi done ac_ext=cpp ac_cpp='$CXXCPP $CPPFLAGS' ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_cxx_compiler_gnu cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int i; _ACEOF if ac_fn_cxx_try_compile "$LINENO"; then : else as_fn_error $? "C++ compiler missing or inoperational" "$LINENO" 5 fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext ac_ext=c ac_cpp='$CPP $CPPFLAGS' ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_c_compiler_gnu #Make sure all types are covered { $as_echo "$as_me:${as_lineno-$LINENO}: checking for stdbool.h that conforms to C99" >&5 $as_echo_n "checking for stdbool.h that conforms to C99... " >&6; } if ${ac_cv_header_stdbool_h+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include #ifndef bool "error: bool is not defined" #endif #ifndef false "error: false is not defined" #endif #if false "error: false is not 0" #endif #ifndef true "error: true is not defined" #endif #if true != 1 "error: true is not 1" #endif #ifndef __bool_true_false_are_defined "error: __bool_true_false_are_defined is not defined" #endif struct s { _Bool s: 1; _Bool t; } s; char a[true == 1 ? 1 : -1]; char b[false == 0 ? 1 : -1]; char c[__bool_true_false_are_defined == 1 ? 1 : -1]; char d[(bool) 0.5 == true ? 1 : -1]; /* See body of main program for 'e'. */ char f[(_Bool) 0.0 == false ? 1 : -1]; char g[true]; char h[sizeof (_Bool)]; char i[sizeof s.t]; enum { j = false, k = true, l = false * true, m = true * 256 }; /* The following fails for HP aC++/ANSI C B3910B A.05.55 [Dec 04 2003]. */ _Bool n[m]; char o[sizeof n == m * sizeof n[0] ? 1 : -1]; char p[-1 - (_Bool) 0 < 0 && -1 - (bool) 0 < 0 ? 1 : -1]; /* Catch a bug in an HP-UX C compiler. See http://gcc.gnu.org/ml/gcc-patches/2003-12/msg02303.html http://lists.gnu.org/archive/html/bug-coreutils/2005-11/msg00161.html */ _Bool q = true; _Bool *pq = &q; int main () { bool e = &s; *pq |= q; *pq |= ! q; /* Refer to every declared value, to avoid compiler optimizations. */ return (!a + !b + !c + !d + !e + !f + !g + !h + !i + !!j + !k + !!l + !m + !n + !o + !p + !q + !pq); ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : ac_cv_header_stdbool_h=yes else ac_cv_header_stdbool_h=no fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_header_stdbool_h" >&5 $as_echo "$ac_cv_header_stdbool_h" >&6; } ac_fn_c_check_type "$LINENO" "_Bool" "ac_cv_type__Bool" "$ac_includes_default" if test "x$ac_cv_type__Bool" = xyes; then : cat >>confdefs.h <<_ACEOF #define HAVE__BOOL 1 _ACEOF fi if test $ac_cv_header_stdbool_h = yes; then $as_echo "#define HAVE_STDBOOL_H 1" >>confdefs.h fi ac_fn_c_find_intX_t "$LINENO" "32" "ac_cv_c_int32_t" case $ac_cv_c_int32_t in #( no|yes) ;; #( *) cat >>confdefs.h <<_ACEOF #define int32_t $ac_cv_c_int32_t _ACEOF ;; esac ac_fn_c_find_intX_t "$LINENO" "64" "ac_cv_c_int64_t" case $ac_cv_c_int64_t in #( no|yes) ;; #( *) cat >>confdefs.h <<_ACEOF #define int64_t $ac_cv_c_int64_t _ACEOF ;; esac ac_fn_c_check_type "$LINENO" "size_t" "ac_cv_type_size_t" "$ac_includes_default" if test "x$ac_cv_type_size_t" = xyes; then : else cat >>confdefs.h <<_ACEOF #define size_t unsigned int _ACEOF fi ac_fn_c_check_type "$LINENO" "ssize_t" "ac_cv_type_ssize_t" "$ac_includes_default" if test "x$ac_cv_type_ssize_t" = xyes; then : else cat >>confdefs.h <<_ACEOF #define ssize_t int _ACEOF fi ac_fn_c_find_uintX_t "$LINENO" "32" "ac_cv_c_uint32_t" case $ac_cv_c_uint32_t in #( no|yes) ;; #( *) $as_echo "#define _UINT32_T 1" >>confdefs.h cat >>confdefs.h <<_ACEOF #define uint32_t $ac_cv_c_uint32_t _ACEOF ;; esac ac_fn_c_find_uintX_t "$LINENO" "64" "ac_cv_c_uint64_t" case $ac_cv_c_uint64_t in #( no|yes) ;; #( *) $as_echo "#define _UINT64_T 1" >>confdefs.h cat >>confdefs.h <<_ACEOF #define uint64_t $ac_cv_c_uint64_t _ACEOF ;; esac ac_fn_c_find_uintX_t "$LINENO" "8" "ac_cv_c_uint8_t" case $ac_cv_c_uint8_t in #( no|yes) ;; #( *) $as_echo "#define _UINT8_T 1" >>confdefs.h cat >>confdefs.h <<_ACEOF #define uint8_t $ac_cv_c_uint8_t _ACEOF ;; esac { $as_echo "$as_me:${as_lineno-$LINENO}: checking for error_at_line" >&5 $as_echo_n "checking for error_at_line... " >&6; } if ${ac_cv_lib_error_at_line+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include int main () { error_at_line (0, 0, "", 0, "an error occurred"); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : ac_cv_lib_error_at_line=yes else ac_cv_lib_error_at_line=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_error_at_line" >&5 $as_echo "$ac_cv_lib_error_at_line" >&6; } if test $ac_cv_lib_error_at_line = no; then case " $LIBOBJS " in *" error.$ac_objext "* ) ;; *) LIBOBJS="$LIBOBJS error.$ac_objext" ;; esac fi for ac_header in stdlib.h do : ac_fn_c_check_header_mongrel "$LINENO" "stdlib.h" "ac_cv_header_stdlib_h" "$ac_includes_default" if test "x$ac_cv_header_stdlib_h" = xyes; then : cat >>confdefs.h <<_ACEOF #define HAVE_STDLIB_H 1 _ACEOF fi done { $as_echo "$as_me:${as_lineno-$LINENO}: checking for GNU libc compatible malloc" >&5 $as_echo_n "checking for GNU libc compatible malloc... " >&6; } if ${ac_cv_func_malloc_0_nonnull+:} false; then : $as_echo_n "(cached) " >&6 else if test "$cross_compiling" = yes; then : ac_cv_func_malloc_0_nonnull=no else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #if defined STDC_HEADERS || defined HAVE_STDLIB_H # include #else char *malloc (); #endif int main () { return ! malloc (0); ; return 0; } _ACEOF if ac_fn_c_try_run "$LINENO"; then : ac_cv_func_malloc_0_nonnull=yes else ac_cv_func_malloc_0_nonnull=no fi rm -f core *.core core.conftest.* gmon.out bb.out conftest$ac_exeext \ conftest.$ac_objext conftest.beam conftest.$ac_ext fi fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_func_malloc_0_nonnull" >&5 $as_echo "$ac_cv_func_malloc_0_nonnull" >&6; } if test $ac_cv_func_malloc_0_nonnull = yes; then : $as_echo "#define HAVE_MALLOC 1" >>confdefs.h else $as_echo "#define HAVE_MALLOC 0" >>confdefs.h case " $LIBOBJS " in *" malloc.$ac_objext "* ) ;; *) LIBOBJS="$LIBOBJS malloc.$ac_objext" ;; esac $as_echo "#define malloc rpl_malloc" >>confdefs.h fi for ac_header in stdlib.h do : ac_fn_c_check_header_mongrel "$LINENO" "stdlib.h" "ac_cv_header_stdlib_h" "$ac_includes_default" if test "x$ac_cv_header_stdlib_h" = xyes; then : cat >>confdefs.h <<_ACEOF #define HAVE_STDLIB_H 1 _ACEOF fi done { $as_echo "$as_me:${as_lineno-$LINENO}: checking for GNU libc compatible realloc" >&5 $as_echo_n "checking for GNU libc compatible realloc... " >&6; } if ${ac_cv_func_realloc_0_nonnull+:} false; then : $as_echo_n "(cached) " >&6 else if test "$cross_compiling" = yes; then : ac_cv_func_realloc_0_nonnull=no else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #if defined STDC_HEADERS || defined HAVE_STDLIB_H # include #else char *realloc (); #endif int main () { return ! realloc (0, 0); ; return 0; } _ACEOF if ac_fn_c_try_run "$LINENO"; then : ac_cv_func_realloc_0_nonnull=yes else ac_cv_func_realloc_0_nonnull=no fi rm -f core *.core core.conftest.* gmon.out bb.out conftest$ac_exeext \ conftest.$ac_objext conftest.beam conftest.$ac_ext fi fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_func_realloc_0_nonnull" >&5 $as_echo "$ac_cv_func_realloc_0_nonnull" >&6; } if test $ac_cv_func_realloc_0_nonnull = yes; then : $as_echo "#define HAVE_REALLOC 1" >>confdefs.h else $as_echo "#define HAVE_REALLOC 0" >>confdefs.h case " $LIBOBJS " in *" realloc.$ac_objext "* ) ;; *) LIBOBJS="$LIBOBJS realloc.$ac_objext" ;; esac $as_echo "#define realloc rpl_realloc" >>confdefs.h fi for ac_func in getpass memset socket strchr strdup strtol strtoul do : as_ac_var=`$as_echo "ac_cv_func_$ac_func" | $as_tr_sh` ac_fn_c_check_func "$LINENO" "$ac_func" "$as_ac_var" if eval test \"x\$"$as_ac_var"\" = x"yes"; then : cat >>confdefs.h <<_ACEOF #define `$as_echo "HAVE_$ac_func" | $as_tr_cpp` 1 _ACEOF fi done for ac_header in dlfcn.h do : ac_fn_c_check_header_mongrel "$LINENO" "dlfcn.h" "ac_cv_header_dlfcn_h" "$ac_includes_default" if test "x$ac_cv_header_dlfcn_h" = xyes; then : cat >>confdefs.h <<_ACEOF #define HAVE_DLFCN_H 1 _ACEOF fi done #Check for openssl development libs, we are using in simc_lsmplugin { $as_echo "$as_me:${as_lineno-$LINENO}: checking for MD5_Final in -lcrypto" >&5 $as_echo_n "checking for MD5_Final in -lcrypto... " >&6; } if ${ac_cv_lib_crypto_MD5_Final+:} false; then : $as_echo_n "(cached) " >&6 else ac_check_lib_save_LIBS=$LIBS LIBS="-lcrypto $LIBS" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char MD5_Final (); int main () { return MD5_Final (); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : ac_cv_lib_crypto_MD5_Final=yes else ac_cv_lib_crypto_MD5_Final=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext LIBS=$ac_check_lib_save_LIBS fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_crypto_MD5_Final" >&5 $as_echo "$ac_cv_lib_crypto_MD5_Final" >&6; } if test "x$ac_cv_lib_crypto_MD5_Final" = xyes; then : SSL_LIBS=-lcrypto else as_fn_error $? "Missing openssl-devel libraries" "$LINENO" 5 fi #Check for json parser yajl for ac_header in yajl/yajl_gen.h yajl/yajl_parse.h do : as_ac_Header=`$as_echo "ac_cv_header_$ac_header" | $as_tr_sh` ac_fn_c_check_header_mongrel "$LINENO" "$ac_header" "$as_ac_Header" "$ac_includes_default" if eval test \"x\$"$as_ac_Header"\" = x"yes"; then : cat >>confdefs.h <<_ACEOF #define `$as_echo "HAVE_$ac_header" | $as_tr_cpp` 1 _ACEOF else as_fn_error $? "Missing yajl development headers" "$LINENO" 5 fi done for ac_header in yajl/yajl_version.h do : ac_fn_c_check_header_mongrel "$LINENO" "yajl/yajl_version.h" "ac_cv_header_yajl_yajl_version_h" "$ac_includes_default" if test "x$ac_cv_header_yajl_yajl_version_h" = xyes; then : cat >>confdefs.h <<_ACEOF #define HAVE_YAJL_YAJL_VERSION_H 1 _ACEOF fi done { $as_echo "$as_me:${as_lineno-$LINENO}: checking for yajl_parse in -lyajl" >&5 $as_echo_n "checking for yajl_parse in -lyajl... " >&6; } if ${ac_cv_lib_yajl_yajl_parse+:} false; then : $as_echo_n "(cached) " >&6 else ac_check_lib_save_LIBS=$LIBS LIBS="-lyajl $LIBS" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char yajl_parse (); int main () { return yajl_parse (); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : ac_cv_lib_yajl_yajl_parse=yes else ac_cv_lib_yajl_yajl_parse=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext LIBS=$ac_check_lib_save_LIBS fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_yajl_yajl_parse" >&5 $as_echo "$ac_cv_lib_yajl_yajl_parse" >&6; } if test "x$ac_cv_lib_yajl_yajl_parse" = xyes; then : YAJL_LIBS=-lyajl else as_fn_error $? "Missing yajl library" "$LINENO" 5 fi if test "$prefix" = "/usr" && test "$localstatedir" = '${prefix}/var' ; then localstatedir='/var' fi if test "$prefix" = "/usr" && test "$sysconfdir" = '${prefix}/etc' ; then sysconfdir='/etc' fi pkg_failed=no { $as_echo "$as_me:${as_lineno-$LINENO}: checking for LIBXML" >&5 $as_echo_n "checking for LIBXML... " >&6; } if test -n "$LIBXML_CFLAGS"; then pkg_cv_LIBXML_CFLAGS="$LIBXML_CFLAGS" elif test -n "$PKG_CONFIG"; then if test -n "$PKG_CONFIG" && \ { { $as_echo "$as_me:${as_lineno-$LINENO}: \$PKG_CONFIG --exists --print-errors \"libxml-2.0\""; } >&5 ($PKG_CONFIG --exists --print-errors "libxml-2.0") 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; }; then pkg_cv_LIBXML_CFLAGS=`$PKG_CONFIG --cflags "libxml-2.0" 2>/dev/null` test "x$?" != "x0" && pkg_failed=yes else pkg_failed=yes fi else pkg_failed=untried fi if test -n "$LIBXML_LIBS"; then pkg_cv_LIBXML_LIBS="$LIBXML_LIBS" elif test -n "$PKG_CONFIG"; then if test -n "$PKG_CONFIG" && \ { { $as_echo "$as_me:${as_lineno-$LINENO}: \$PKG_CONFIG --exists --print-errors \"libxml-2.0\""; } >&5 ($PKG_CONFIG --exists --print-errors "libxml-2.0") 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; }; then pkg_cv_LIBXML_LIBS=`$PKG_CONFIG --libs "libxml-2.0" 2>/dev/null` test "x$?" != "x0" && pkg_failed=yes else pkg_failed=yes fi else pkg_failed=untried fi if test $pkg_failed = yes; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } if $PKG_CONFIG --atleast-pkgconfig-version 0.20; then _pkg_short_errors_supported=yes else _pkg_short_errors_supported=no fi if test $_pkg_short_errors_supported = yes; then LIBXML_PKG_ERRORS=`$PKG_CONFIG --short-errors --print-errors --cflags --libs "libxml-2.0" 2>&1` else LIBXML_PKG_ERRORS=`$PKG_CONFIG --print-errors --cflags --libs "libxml-2.0" 2>&1` fi # Put the nasty error message in config.log where it belongs echo "$LIBXML_PKG_ERRORS" >&5 as_fn_error $? "Package requirements (libxml-2.0) were not met: $LIBXML_PKG_ERRORS Consider adjusting the PKG_CONFIG_PATH environment variable if you installed software in a non-standard prefix. Alternatively, you may set the environment variables LIBXML_CFLAGS and LIBXML_LIBS to avoid the need to call pkg-config. See the pkg-config man page for more details." "$LINENO" 5 elif test $pkg_failed = untried; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 $as_echo "$as_me: error: in \`$ac_pwd':" >&2;} as_fn_error $? "The pkg-config script could not be found or is too old. Make sure it is in your PATH or set the PKG_CONFIG environment variable to the full path to pkg-config. Alternatively, you may set the environment variables LIBXML_CFLAGS and LIBXML_LIBS to avoid the need to call pkg-config. See the pkg-config man page for more details. To get pkg-config, see . See \`config.log' for more details" "$LINENO" 5; } else LIBXML_CFLAGS=$pkg_cv_LIBXML_CFLAGS LIBXML_LIBS=$pkg_cv_LIBXML_LIBS { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 $as_echo "yes" >&6; } fi pkg_failed=no { $as_echo "$as_me:${as_lineno-$LINENO}: checking for LIBGLIB" >&5 $as_echo_n "checking for LIBGLIB... " >&6; } if test -n "$LIBGLIB_CFLAGS"; then pkg_cv_LIBGLIB_CFLAGS="$LIBGLIB_CFLAGS" elif test -n "$PKG_CONFIG"; then if test -n "$PKG_CONFIG" && \ { { $as_echo "$as_me:${as_lineno-$LINENO}: \$PKG_CONFIG --exists --print-errors \"glib-2.0 >= 2.22.5\""; } >&5 ($PKG_CONFIG --exists --print-errors "glib-2.0 >= 2.22.5") 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; }; then pkg_cv_LIBGLIB_CFLAGS=`$PKG_CONFIG --cflags "glib-2.0 >= 2.22.5" 2>/dev/null` test "x$?" != "x0" && pkg_failed=yes else pkg_failed=yes fi else pkg_failed=untried fi if test -n "$LIBGLIB_LIBS"; then pkg_cv_LIBGLIB_LIBS="$LIBGLIB_LIBS" elif test -n "$PKG_CONFIG"; then if test -n "$PKG_CONFIG" && \ { { $as_echo "$as_me:${as_lineno-$LINENO}: \$PKG_CONFIG --exists --print-errors \"glib-2.0 >= 2.22.5\""; } >&5 ($PKG_CONFIG --exists --print-errors "glib-2.0 >= 2.22.5") 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; }; then pkg_cv_LIBGLIB_LIBS=`$PKG_CONFIG --libs "glib-2.0 >= 2.22.5" 2>/dev/null` test "x$?" != "x0" && pkg_failed=yes else pkg_failed=yes fi else pkg_failed=untried fi if test $pkg_failed = yes; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } if $PKG_CONFIG --atleast-pkgconfig-version 0.20; then _pkg_short_errors_supported=yes else _pkg_short_errors_supported=no fi if test $_pkg_short_errors_supported = yes; then LIBGLIB_PKG_ERRORS=`$PKG_CONFIG --short-errors --print-errors --cflags --libs "glib-2.0 >= 2.22.5" 2>&1` else LIBGLIB_PKG_ERRORS=`$PKG_CONFIG --print-errors --cflags --libs "glib-2.0 >= 2.22.5" 2>&1` fi # Put the nasty error message in config.log where it belongs echo "$LIBGLIB_PKG_ERRORS" >&5 as_fn_error $? "Package requirements (glib-2.0 >= 2.22.5) were not met: $LIBGLIB_PKG_ERRORS Consider adjusting the PKG_CONFIG_PATH environment variable if you installed software in a non-standard prefix. Alternatively, you may set the environment variables LIBGLIB_CFLAGS and LIBGLIB_LIBS to avoid the need to call pkg-config. See the pkg-config man page for more details." "$LINENO" 5 elif test $pkg_failed = untried; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 $as_echo "$as_me: error: in \`$ac_pwd':" >&2;} as_fn_error $? "The pkg-config script could not be found or is too old. Make sure it is in your PATH or set the PKG_CONFIG environment variable to the full path to pkg-config. Alternatively, you may set the environment variables LIBGLIB_CFLAGS and LIBGLIB_LIBS to avoid the need to call pkg-config. See the pkg-config man page for more details. To get pkg-config, see . See \`config.log' for more details" "$LINENO" 5; } else LIBGLIB_CFLAGS=$pkg_cv_LIBGLIB_CFLAGS LIBGLIB_LIBS=$pkg_cv_LIBGLIB_LIBS { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 $as_echo "yes" >&6; } fi want_c_unit="yes" # Check whether --enable-build-c-unit was given. if test "${enable_build_c_unit+set}" = set; then : enableval=$enable_build_c_unit; want_c_unit=${enableval} fi if test "x${want_c_unit}" = "xyes"; then pkg_failed=no { $as_echo "$as_me:${as_lineno-$LINENO}: checking for LIBCHECK" >&5 $as_echo_n "checking for LIBCHECK... " >&6; } if test -n "$LIBCHECK_CFLAGS"; then pkg_cv_LIBCHECK_CFLAGS="$LIBCHECK_CFLAGS" elif test -n "$PKG_CONFIG"; then if test -n "$PKG_CONFIG" && \ { { $as_echo "$as_me:${as_lineno-$LINENO}: \$PKG_CONFIG --exists --print-errors \"check >= 0.9.8 \""; } >&5 ($PKG_CONFIG --exists --print-errors "check >= 0.9.8 ") 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; }; then pkg_cv_LIBCHECK_CFLAGS=`$PKG_CONFIG --cflags "check >= 0.9.8 " 2>/dev/null` test "x$?" != "x0" && pkg_failed=yes else pkg_failed=yes fi else pkg_failed=untried fi if test -n "$LIBCHECK_LIBS"; then pkg_cv_LIBCHECK_LIBS="$LIBCHECK_LIBS" elif test -n "$PKG_CONFIG"; then if test -n "$PKG_CONFIG" && \ { { $as_echo "$as_me:${as_lineno-$LINENO}: \$PKG_CONFIG --exists --print-errors \"check >= 0.9.8 \""; } >&5 ($PKG_CONFIG --exists --print-errors "check >= 0.9.8 ") 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; }; then pkg_cv_LIBCHECK_LIBS=`$PKG_CONFIG --libs "check >= 0.9.8 " 2>/dev/null` test "x$?" != "x0" && pkg_failed=yes else pkg_failed=yes fi else pkg_failed=untried fi if test $pkg_failed = yes; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } if $PKG_CONFIG --atleast-pkgconfig-version 0.20; then _pkg_short_errors_supported=yes else _pkg_short_errors_supported=no fi if test $_pkg_short_errors_supported = yes; then LIBCHECK_PKG_ERRORS=`$PKG_CONFIG --short-errors --print-errors --cflags --libs "check >= 0.9.8 " 2>&1` else LIBCHECK_PKG_ERRORS=`$PKG_CONFIG --print-errors --cflags --libs "check >= 0.9.8 " 2>&1` fi # Put the nasty error message in config.log where it belongs echo "$LIBCHECK_PKG_ERRORS" >&5 as_fn_error $? "Package requirements (check >= 0.9.8 ) were not met: $LIBCHECK_PKG_ERRORS Consider adjusting the PKG_CONFIG_PATH environment variable if you installed software in a non-standard prefix. Alternatively, you may set the environment variables LIBCHECK_CFLAGS and LIBCHECK_LIBS to avoid the need to call pkg-config. See the pkg-config man page for more details." "$LINENO" 5 elif test $pkg_failed = untried; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 $as_echo "$as_me: error: in \`$ac_pwd':" >&2;} as_fn_error $? "The pkg-config script could not be found or is too old. Make sure it is in your PATH or set the PKG_CONFIG environment variable to the full path to pkg-config. Alternatively, you may set the environment variables LIBCHECK_CFLAGS and LIBCHECK_LIBS to avoid the need to call pkg-config. See the pkg-config man page for more details. To get pkg-config, see . See \`config.log' for more details" "$LINENO" 5; } else LIBCHECK_CFLAGS=$pkg_cv_LIBCHECK_CFLAGS LIBCHECK_LIBS=$pkg_cv_LIBCHECK_LIBS { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 $as_echo "yes" >&6; } fi fi if test "x${want_c_unit}" = "xyes"; then BUILD_C_UNIT_TRUE= BUILD_C_UNIT_FALSE='#' else BUILD_C_UNIT_TRUE='#' BUILD_C_UNIT_FALSE= fi for ac_prog in perl do # Extract the first word of "$ac_prog", so it can be a program name with args. set dummy $ac_prog; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_path_PERL+:} false; then : $as_echo_n "(cached) " >&6 else case $PERL in [\\/]* | ?:[\\/]*) ac_cv_path_PERL="$PERL" # Let the user override the test with a path. ;; *) as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_path_PERL="$as_dir/$ac_word$ac_exec_ext" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS ;; esac fi PERL=$ac_cv_path_PERL if test -n "$PERL"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $PERL" >&5 $as_echo "$PERL" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi test -n "$PERL" && break done for ac_prog in python2.7 python2.6 python do # Extract the first word of "$ac_prog", so it can be a program name with args. set dummy $ac_prog; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_path_PYTHON+:} false; then : $as_echo_n "(cached) " >&6 else case $PYTHON in [\\/]* | ?:[\\/]*) ac_cv_path_PYTHON="$PYTHON" # Let the user override the test with a path. ;; *) as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_path_PYTHON="$as_dir/$ac_word$ac_exec_ext" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS ;; esac fi PYTHON=$ac_cv_path_PYTHON if test -n "$PYTHON"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $PYTHON" >&5 $as_echo "$PYTHON" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi test -n "$PYTHON" && break done test -n "$PYTHON" || PYTHON="Python is required" { $as_echo "$as_me:${as_lineno-$LINENO}: checking Check for Python major version" >&5 $as_echo_n "checking Check for Python major version... " >&6; } PYTHON_MAJOR_VERSION=`$PYTHON -c "import sys; print(sys.version_info[0])"` case "$PYTHON_MAJOR_VERSION" in 2) ;; *) as_fn_error $? "we need Python version 2.x but found $PYTHON_MAJOR_VERSION.x" "$LINENO" 5 ;; esac { $as_echo "$as_me:${as_lineno-$LINENO}: result: $PYTHON_MAJOR_VERSION" >&5 $as_echo "$PYTHON_MAJOR_VERSION" >&6; } if test -n "$PYTHON"; then # If the user set $PYTHON, use it and don't search something else. { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether $PYTHON version is >= 2.6" >&5 $as_echo_n "checking whether $PYTHON version is >= 2.6... " >&6; } prog="import sys # split strings by '.' and convert to numeric. Append some zeros # because we need at least 4 digits for the hex conversion. # map returns an iterator in Python 3.0 and a list in 2.x minver = list(map(int, '2.6'.split('.'))) + [0, 0, 0] minverhex = 0 # xrange is not present in Python 3.0 and range returns an iterator for i in list(range(0, 4)): minverhex = (minverhex << 8) + minver[i] sys.exit(sys.hexversion < minverhex)" if { echo "$as_me:$LINENO: $PYTHON -c "$prog"" >&5 ($PYTHON -c "$prog") >&5 2>&5 ac_status=$? echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); }; then : { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 $as_echo "yes" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } as_fn_error $? "Python interpreter is too old" "$LINENO" 5 fi am_display_PYTHON=$PYTHON else # Otherwise, try each interpreter until we find one that satisfies # VERSION. { $as_echo "$as_me:${as_lineno-$LINENO}: checking for a Python interpreter with version >= 2.6" >&5 $as_echo_n "checking for a Python interpreter with version >= 2.6... " >&6; } if ${am_cv_pathless_PYTHON+:} false; then : $as_echo_n "(cached) " >&6 else for am_cv_pathless_PYTHON in python python2 python3 python3.3 python3.2 python3.1 python3.0 python2.7 python2.6 python2.5 python2.4 python2.3 python2.2 python2.1 python2.0 none; do test "$am_cv_pathless_PYTHON" = none && break prog="import sys # split strings by '.' and convert to numeric. Append some zeros # because we need at least 4 digits for the hex conversion. # map returns an iterator in Python 3.0 and a list in 2.x minver = list(map(int, '2.6'.split('.'))) + [0, 0, 0] minverhex = 0 # xrange is not present in Python 3.0 and range returns an iterator for i in list(range(0, 4)): minverhex = (minverhex << 8) + minver[i] sys.exit(sys.hexversion < minverhex)" if { echo "$as_me:$LINENO: $am_cv_pathless_PYTHON -c "$prog"" >&5 ($am_cv_pathless_PYTHON -c "$prog") >&5 2>&5 ac_status=$? echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); }; then : break fi done fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $am_cv_pathless_PYTHON" >&5 $as_echo "$am_cv_pathless_PYTHON" >&6; } # Set $PYTHON to the absolute path of $am_cv_pathless_PYTHON. if test "$am_cv_pathless_PYTHON" = none; then PYTHON=: else # Extract the first word of "$am_cv_pathless_PYTHON", so it can be a program name with args. set dummy $am_cv_pathless_PYTHON; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_path_PYTHON+:} false; then : $as_echo_n "(cached) " >&6 else case $PYTHON in [\\/]* | ?:[\\/]*) ac_cv_path_PYTHON="$PYTHON" # Let the user override the test with a path. ;; *) as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_path_PYTHON="$as_dir/$ac_word$ac_exec_ext" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS ;; esac fi PYTHON=$ac_cv_path_PYTHON if test -n "$PYTHON"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $PYTHON" >&5 $as_echo "$PYTHON" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi fi am_display_PYTHON=$am_cv_pathless_PYTHON fi if test "$PYTHON" = :; then as_fn_error $? "Python interpreter 2.6 or 2.7 required" "$LINENO" 5 else { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $am_display_PYTHON version" >&5 $as_echo_n "checking for $am_display_PYTHON version... " >&6; } if ${am_cv_python_version+:} false; then : $as_echo_n "(cached) " >&6 else am_cv_python_version=`$PYTHON -c "import sys; sys.stdout.write(sys.version[:3])"` fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $am_cv_python_version" >&5 $as_echo "$am_cv_python_version" >&6; } PYTHON_VERSION=$am_cv_python_version PYTHON_PREFIX='${prefix}' PYTHON_EXEC_PREFIX='${exec_prefix}' { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $am_display_PYTHON platform" >&5 $as_echo_n "checking for $am_display_PYTHON platform... " >&6; } if ${am_cv_python_platform+:} false; then : $as_echo_n "(cached) " >&6 else am_cv_python_platform=`$PYTHON -c "import sys; sys.stdout.write(sys.platform)"` fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $am_cv_python_platform" >&5 $as_echo "$am_cv_python_platform" >&6; } PYTHON_PLATFORM=$am_cv_python_platform # Just factor out some code duplication. am_python_setup_sysconfig="\ import sys # Prefer sysconfig over distutils.sysconfig, for better compatibility # with python 3.x. See automake bug#10227. try: import sysconfig except ImportError: can_use_sysconfig = 0 else: can_use_sysconfig = 1 # Can't use sysconfig in CPython 2.7, since it's broken in virtualenvs: # try: from platform import python_implementation if python_implementation() == 'CPython' and sys.version[:3] == '2.7': can_use_sysconfig = 0 except ImportError: pass" { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $am_display_PYTHON script directory" >&5 $as_echo_n "checking for $am_display_PYTHON script directory... " >&6; } if ${am_cv_python_pythondir+:} false; then : $as_echo_n "(cached) " >&6 else if test "x$prefix" = xNONE then am_py_prefix=$ac_default_prefix else am_py_prefix=$prefix fi am_cv_python_pythondir=`$PYTHON -c " $am_python_setup_sysconfig if can_use_sysconfig: sitedir = sysconfig.get_path('purelib', vars={'base':'$am_py_prefix'}) else: from distutils import sysconfig sitedir = sysconfig.get_python_lib(0, 0, prefix='$am_py_prefix') sys.stdout.write(sitedir)"` case $am_cv_python_pythondir in $am_py_prefix*) am__strip_prefix=`echo "$am_py_prefix" | sed 's|.|.|g'` am_cv_python_pythondir=`echo "$am_cv_python_pythondir" | sed "s,^$am__strip_prefix,$PYTHON_PREFIX,"` ;; *) case $am_py_prefix in /usr|/System*) ;; *) am_cv_python_pythondir=$PYTHON_PREFIX/lib/python$PYTHON_VERSION/site-packages ;; esac ;; esac fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $am_cv_python_pythondir" >&5 $as_echo "$am_cv_python_pythondir" >&6; } pythondir=$am_cv_python_pythondir pkgpythondir=\${pythondir}/$PACKAGE { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $am_display_PYTHON extension module directory" >&5 $as_echo_n "checking for $am_display_PYTHON extension module directory... " >&6; } if ${am_cv_python_pyexecdir+:} false; then : $as_echo_n "(cached) " >&6 else if test "x$exec_prefix" = xNONE then am_py_exec_prefix=$am_py_prefix else am_py_exec_prefix=$exec_prefix fi am_cv_python_pyexecdir=`$PYTHON -c " $am_python_setup_sysconfig if can_use_sysconfig: sitedir = sysconfig.get_path('platlib', vars={'platbase':'$am_py_prefix'}) else: from distutils import sysconfig sitedir = sysconfig.get_python_lib(1, 0, prefix='$am_py_prefix') sys.stdout.write(sitedir)"` case $am_cv_python_pyexecdir in $am_py_exec_prefix*) am__strip_prefix=`echo "$am_py_exec_prefix" | sed 's|.|.|g'` am_cv_python_pyexecdir=`echo "$am_cv_python_pyexecdir" | sed "s,^$am__strip_prefix,$PYTHON_EXEC_PREFIX,"` ;; *) case $am_py_exec_prefix in /usr|/System*) ;; *) am_cv_python_pyexecdir=$PYTHON_EXEC_PREFIX/lib/python$PYTHON_VERSION/site-packages ;; esac ;; esac fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $am_cv_python_pyexecdir" >&5 $as_echo "$am_cv_python_pyexecdir" >&6; } pyexecdir=$am_cv_python_pyexecdir pkgpyexecdir=\${pyexecdir}/$PACKAGE fi if test -z $PYTHON; then PYTHON="python" fi PYTHON_NAME=`basename $PYTHON` { $as_echo "$as_me:${as_lineno-$LINENO}: checking $PYTHON_NAME module: pywbem" >&5 $as_echo_n "checking $PYTHON_NAME module: pywbem... " >&6; } $PYTHON -c "import pywbem" 2>/dev/null if test $? -eq 0; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 $as_echo "yes" >&6; } eval HAVE_PYMOD_PYWBEM=yes else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } eval HAVE_PYMOD_PYWBEM=no # if test -n "Required" then as_fn_error $? "failed to find required module pywbem" "$LINENO" 5 exit 1 fi fi if test -z $PYTHON; then PYTHON="python" fi PYTHON_NAME=`basename $PYTHON` { $as_echo "$as_me:${as_lineno-$LINENO}: checking $PYTHON_NAME module: M2Crypto" >&5 $as_echo_n "checking $PYTHON_NAME module: M2Crypto... " >&6; } $PYTHON -c "import M2Crypto" 2>/dev/null if test $? -eq 0; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 $as_echo "yes" >&6; } eval HAVE_PYMOD_M2CRYPTO=yes else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } eval HAVE_PYMOD_M2CRYPTO=no # if test -n "Required" then as_fn_error $? "failed to find required module M2Crypto" "$LINENO" 5 exit 1 fi fi if test -z $PYTHON; then PYTHON="python" fi PYTHON_NAME=`basename $PYTHON` { $as_echo "$as_me:${as_lineno-$LINENO}: checking $PYTHON_NAME module: argparse" >&5 $as_echo_n "checking $PYTHON_NAME module: argparse... " >&6; } $PYTHON -c "import argparse" 2>/dev/null if test $? -eq 0; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 $as_echo "yes" >&6; } eval HAVE_PYMOD_ARGPARSE=yes else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } eval HAVE_PYMOD_ARGPARSE=no # if test -n "Required" then as_fn_error $? "failed to find required module argparse" "$LINENO" 5 exit 1 fi fi # Check whether --with-rest-api was given. if test "${with_rest_api+set}" = set; then : withval=$with_rest_api; else with_rest_api=yes fi if test "x$with_rest_api" = "xyes"; then pkg_failed=no { $as_echo "$as_me:${as_lineno-$LINENO}: checking for LIBMICROHTTPD" >&5 $as_echo_n "checking for LIBMICROHTTPD... " >&6; } if test -n "$LIBMICROHTTPD_CFLAGS"; then pkg_cv_LIBMICROHTTPD_CFLAGS="$LIBMICROHTTPD_CFLAGS" elif test -n "$PKG_CONFIG"; then if test -n "$PKG_CONFIG" && \ { { $as_echo "$as_me:${as_lineno-$LINENO}: \$PKG_CONFIG --exists --print-errors \"libmicrohttpd >= 0.9\""; } >&5 ($PKG_CONFIG --exists --print-errors "libmicrohttpd >= 0.9") 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; }; then pkg_cv_LIBMICROHTTPD_CFLAGS=`$PKG_CONFIG --cflags "libmicrohttpd >= 0.9" 2>/dev/null` test "x$?" != "x0" && pkg_failed=yes else pkg_failed=yes fi else pkg_failed=untried fi if test -n "$LIBMICROHTTPD_LIBS"; then pkg_cv_LIBMICROHTTPD_LIBS="$LIBMICROHTTPD_LIBS" elif test -n "$PKG_CONFIG"; then if test -n "$PKG_CONFIG" && \ { { $as_echo "$as_me:${as_lineno-$LINENO}: \$PKG_CONFIG --exists --print-errors \"libmicrohttpd >= 0.9\""; } >&5 ($PKG_CONFIG --exists --print-errors "libmicrohttpd >= 0.9") 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; }; then pkg_cv_LIBMICROHTTPD_LIBS=`$PKG_CONFIG --libs "libmicrohttpd >= 0.9" 2>/dev/null` test "x$?" != "x0" && pkg_failed=yes else pkg_failed=yes fi else pkg_failed=untried fi if test $pkg_failed = yes; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } if $PKG_CONFIG --atleast-pkgconfig-version 0.20; then _pkg_short_errors_supported=yes else _pkg_short_errors_supported=no fi if test $_pkg_short_errors_supported = yes; then LIBMICROHTTPD_PKG_ERRORS=`$PKG_CONFIG --short-errors --print-errors --cflags --libs "libmicrohttpd >= 0.9" 2>&1` else LIBMICROHTTPD_PKG_ERRORS=`$PKG_CONFIG --print-errors --cflags --libs "libmicrohttpd >= 0.9" 2>&1` fi # Put the nasty error message in config.log where it belongs echo "$LIBMICROHTTPD_PKG_ERRORS" >&5 as_fn_error $? "Package requirements (libmicrohttpd >= 0.9) were not met: $LIBMICROHTTPD_PKG_ERRORS Consider adjusting the PKG_CONFIG_PATH environment variable if you installed software in a non-standard prefix. Alternatively, you may set the environment variables LIBMICROHTTPD_CFLAGS and LIBMICROHTTPD_LIBS to avoid the need to call pkg-config. See the pkg-config man page for more details." "$LINENO" 5 elif test $pkg_failed = untried; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 $as_echo "$as_me: error: in \`$ac_pwd':" >&2;} as_fn_error $? "The pkg-config script could not be found or is too old. Make sure it is in your PATH or set the PKG_CONFIG environment variable to the full path to pkg-config. Alternatively, you may set the environment variables LIBMICROHTTPD_CFLAGS and LIBMICROHTTPD_LIBS to avoid the need to call pkg-config. See the pkg-config man page for more details. To get pkg-config, see . See \`config.log' for more details" "$LINENO" 5; } else LIBMICROHTTPD_CFLAGS=$pkg_cv_LIBMICROHTTPD_CFLAGS LIBMICROHTTPD_LIBS=$pkg_cv_LIBMICROHTTPD_LIBS { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 $as_echo "yes" >&6; } fi pkg_failed=no { $as_echo "$as_me:${as_lineno-$LINENO}: checking for JSON" >&5 $as_echo_n "checking for JSON... " >&6; } if test -n "$JSON_CFLAGS"; then pkg_cv_JSON_CFLAGS="$JSON_CFLAGS" elif test -n "$PKG_CONFIG"; then if test -n "$PKG_CONFIG" && \ { { $as_echo "$as_me:${as_lineno-$LINENO}: \$PKG_CONFIG --exists --print-errors \"json >= 0.10\""; } >&5 ($PKG_CONFIG --exists --print-errors "json >= 0.10") 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; }; then pkg_cv_JSON_CFLAGS=`$PKG_CONFIG --cflags "json >= 0.10" 2>/dev/null` test "x$?" != "x0" && pkg_failed=yes else pkg_failed=yes fi else pkg_failed=untried fi if test -n "$JSON_LIBS"; then pkg_cv_JSON_LIBS="$JSON_LIBS" elif test -n "$PKG_CONFIG"; then if test -n "$PKG_CONFIG" && \ { { $as_echo "$as_me:${as_lineno-$LINENO}: \$PKG_CONFIG --exists --print-errors \"json >= 0.10\""; } >&5 ($PKG_CONFIG --exists --print-errors "json >= 0.10") 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; }; then pkg_cv_JSON_LIBS=`$PKG_CONFIG --libs "json >= 0.10" 2>/dev/null` test "x$?" != "x0" && pkg_failed=yes else pkg_failed=yes fi else pkg_failed=untried fi if test $pkg_failed = yes; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } if $PKG_CONFIG --atleast-pkgconfig-version 0.20; then _pkg_short_errors_supported=yes else _pkg_short_errors_supported=no fi if test $_pkg_short_errors_supported = yes; then JSON_PKG_ERRORS=`$PKG_CONFIG --short-errors --print-errors --cflags --libs "json >= 0.10" 2>&1` else JSON_PKG_ERRORS=`$PKG_CONFIG --print-errors --cflags --libs "json >= 0.10" 2>&1` fi # Put the nasty error message in config.log where it belongs echo "$JSON_PKG_ERRORS" >&5 pkg_failed=no { $as_echo "$as_me:${as_lineno-$LINENO}: checking for JSON" >&5 $as_echo_n "checking for JSON... " >&6; } if test -n "$JSON_CFLAGS"; then pkg_cv_JSON_CFLAGS="$JSON_CFLAGS" elif test -n "$PKG_CONFIG"; then if test -n "$PKG_CONFIG" && \ { { $as_echo "$as_me:${as_lineno-$LINENO}: \$PKG_CONFIG --exists --print-errors \"json-c\""; } >&5 ($PKG_CONFIG --exists --print-errors "json-c") 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; }; then pkg_cv_JSON_CFLAGS=`$PKG_CONFIG --cflags "json-c" 2>/dev/null` test "x$?" != "x0" && pkg_failed=yes else pkg_failed=yes fi else pkg_failed=untried fi if test -n "$JSON_LIBS"; then pkg_cv_JSON_LIBS="$JSON_LIBS" elif test -n "$PKG_CONFIG"; then if test -n "$PKG_CONFIG" && \ { { $as_echo "$as_me:${as_lineno-$LINENO}: \$PKG_CONFIG --exists --print-errors \"json-c\""; } >&5 ($PKG_CONFIG --exists --print-errors "json-c") 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; }; then pkg_cv_JSON_LIBS=`$PKG_CONFIG --libs "json-c" 2>/dev/null` test "x$?" != "x0" && pkg_failed=yes else pkg_failed=yes fi else pkg_failed=untried fi if test $pkg_failed = yes; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } if $PKG_CONFIG --atleast-pkgconfig-version 0.20; then _pkg_short_errors_supported=yes else _pkg_short_errors_supported=no fi if test $_pkg_short_errors_supported = yes; then JSON_PKG_ERRORS=`$PKG_CONFIG --short-errors --print-errors --cflags --libs "json-c" 2>&1` else JSON_PKG_ERRORS=`$PKG_CONFIG --print-errors --cflags --libs "json-c" 2>&1` fi # Put the nasty error message in config.log where it belongs echo "$JSON_PKG_ERRORS" >&5 as_fn_error $? "json-c development libraries 0.10 or later required" "$LINENO" 5 elif test $pkg_failed = untried; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } as_fn_error $? "json-c development libraries 0.10 or later required" "$LINENO" 5 else JSON_CFLAGS=$pkg_cv_JSON_CFLAGS JSON_LIBS=$pkg_cv_JSON_LIBS { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 $as_echo "yes" >&6; } fi elif test $pkg_failed = untried; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } pkg_failed=no { $as_echo "$as_me:${as_lineno-$LINENO}: checking for JSON" >&5 $as_echo_n "checking for JSON... " >&6; } if test -n "$JSON_CFLAGS"; then pkg_cv_JSON_CFLAGS="$JSON_CFLAGS" elif test -n "$PKG_CONFIG"; then if test -n "$PKG_CONFIG" && \ { { $as_echo "$as_me:${as_lineno-$LINENO}: \$PKG_CONFIG --exists --print-errors \"json-c\""; } >&5 ($PKG_CONFIG --exists --print-errors "json-c") 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; }; then pkg_cv_JSON_CFLAGS=`$PKG_CONFIG --cflags "json-c" 2>/dev/null` test "x$?" != "x0" && pkg_failed=yes else pkg_failed=yes fi else pkg_failed=untried fi if test -n "$JSON_LIBS"; then pkg_cv_JSON_LIBS="$JSON_LIBS" elif test -n "$PKG_CONFIG"; then if test -n "$PKG_CONFIG" && \ { { $as_echo "$as_me:${as_lineno-$LINENO}: \$PKG_CONFIG --exists --print-errors \"json-c\""; } >&5 ($PKG_CONFIG --exists --print-errors "json-c") 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; }; then pkg_cv_JSON_LIBS=`$PKG_CONFIG --libs "json-c" 2>/dev/null` test "x$?" != "x0" && pkg_failed=yes else pkg_failed=yes fi else pkg_failed=untried fi if test $pkg_failed = yes; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } if $PKG_CONFIG --atleast-pkgconfig-version 0.20; then _pkg_short_errors_supported=yes else _pkg_short_errors_supported=no fi if test $_pkg_short_errors_supported = yes; then JSON_PKG_ERRORS=`$PKG_CONFIG --short-errors --print-errors --cflags --libs "json-c" 2>&1` else JSON_PKG_ERRORS=`$PKG_CONFIG --print-errors --cflags --libs "json-c" 2>&1` fi # Put the nasty error message in config.log where it belongs echo "$JSON_PKG_ERRORS" >&5 as_fn_error $? "json-c development libraries 0.10 or later required" "$LINENO" 5 elif test $pkg_failed = untried; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } as_fn_error $? "json-c development libraries 0.10 or later required" "$LINENO" 5 else JSON_CFLAGS=$pkg_cv_JSON_CFLAGS JSON_LIBS=$pkg_cv_JSON_LIBS { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 $as_echo "yes" >&6; } fi else JSON_CFLAGS=$pkg_cv_JSON_CFLAGS JSON_LIBS=$pkg_cv_JSON_LIBS { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 $as_echo "yes" >&6; } fi fi if test "x$with_rest_api" = "xyes"; then WITH_REST_API_TRUE= WITH_REST_API_FALSE='#' else WITH_REST_API_TRUE='#' WITH_REST_API_FALSE= fi # Check whether --with-megaraid was given. if test "${with_megaraid+set}" = set; then : withval=$with_megaraid; else with_megaraid=yes fi if test "x$with_megaraid" = "xyes"; then WITH_MEGARAID_TRUE= WITH_MEGARAID_FALSE='#' else WITH_MEGARAID_TRUE='#' WITH_MEGARAID_FALSE= fi # Check whether --with-hpsa was given. if test "${with_hpsa+set}" = set; then : withval=$with_hpsa; else with_hpsa=yes fi if test "x$with_hpsa" = "xyes"; then WITH_HPSA_TRUE= WITH_HPSA_FALSE='#' else WITH_HPSA_TRUE='#' WITH_HPSA_FALSE= fi pkg_failed=no { $as_echo "$as_me:${as_lineno-$LINENO}: checking for LIBCONFIG" >&5 $as_echo_n "checking for LIBCONFIG... " >&6; } if test -n "$LIBCONFIG_CFLAGS"; then pkg_cv_LIBCONFIG_CFLAGS="$LIBCONFIG_CFLAGS" elif test -n "$PKG_CONFIG"; then if test -n "$PKG_CONFIG" && \ { { $as_echo "$as_me:${as_lineno-$LINENO}: \$PKG_CONFIG --exists --print-errors \"libconfig >= 1.3.2\""; } >&5 ($PKG_CONFIG --exists --print-errors "libconfig >= 1.3.2") 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; }; then pkg_cv_LIBCONFIG_CFLAGS=`$PKG_CONFIG --cflags "libconfig >= 1.3.2" 2>/dev/null` test "x$?" != "x0" && pkg_failed=yes else pkg_failed=yes fi else pkg_failed=untried fi if test -n "$LIBCONFIG_LIBS"; then pkg_cv_LIBCONFIG_LIBS="$LIBCONFIG_LIBS" elif test -n "$PKG_CONFIG"; then if test -n "$PKG_CONFIG" && \ { { $as_echo "$as_me:${as_lineno-$LINENO}: \$PKG_CONFIG --exists --print-errors \"libconfig >= 1.3.2\""; } >&5 ($PKG_CONFIG --exists --print-errors "libconfig >= 1.3.2") 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; }; then pkg_cv_LIBCONFIG_LIBS=`$PKG_CONFIG --libs "libconfig >= 1.3.2" 2>/dev/null` test "x$?" != "x0" && pkg_failed=yes else pkg_failed=yes fi else pkg_failed=untried fi if test $pkg_failed = yes; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } if $PKG_CONFIG --atleast-pkgconfig-version 0.20; then _pkg_short_errors_supported=yes else _pkg_short_errors_supported=no fi if test $_pkg_short_errors_supported = yes; then LIBCONFIG_PKG_ERRORS=`$PKG_CONFIG --short-errors --print-errors --cflags --libs "libconfig >= 1.3.2" 2>&1` else LIBCONFIG_PKG_ERRORS=`$PKG_CONFIG --print-errors --cflags --libs "libconfig >= 1.3.2" 2>&1` fi # Put the nasty error message in config.log where it belongs echo "$LIBCONFIG_PKG_ERRORS" >&5 as_fn_error $? "libconfig 1.3.2 or newer not found." "$LINENO" 5 elif test $pkg_failed = untried; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } as_fn_error $? "libconfig 1.3.2 or newer not found." "$LINENO" 5 else LIBCONFIG_CFLAGS=$pkg_cv_LIBCONFIG_CFLAGS LIBCONFIG_LIBS=$pkg_cv_LIBCONFIG_LIBS { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 $as_echo "yes" >&6; } fi # Check whether --with-bash-completion was given. if test "${with_bash_completion+set}" = set; then : withval=$with_bash_completion; else with_bash_completion=yes fi if test "x$with_bash_completion" = "xyes"; then WITH_BASH_COMPLETION_TRUE= WITH_BASH_COMPLETION_FALSE='#' else WITH_BASH_COMPLETION_TRUE='#' WITH_BASH_COMPLETION_FALSE= fi # Check whether --with-bash-completion-dir was given. if test "${with_bash_completion_dir+set}" = set; then : withval=$with_bash_completion_dir; else if $($PKG_CONFIG --exists bash-completion); then : with_bash_completion_dir=$( $PKG_CONFIG --variable=completionsdir bash-completion) else # EPEL 6 is still shipping bash-completion version 1.x # which does not provide pkg-config support. # So, for EPEL 6 or anyone not installed bash-compeltion, we # use fallback folder '/etc/bash_completion.d' with_bash_completion_dir=$sysconfdir/bash_completion.d fi fi bashcompletiondir=$with_bash_completion_dir #Setup the unit directory for systemd stuff if test "x$ac_cv_env_PKG_CONFIG_set" != "xset"; then if test -n "$ac_tool_prefix"; then # Extract the first word of "${ac_tool_prefix}pkg-config", so it can be a program name with args. set dummy ${ac_tool_prefix}pkg-config; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_path_PKG_CONFIG+:} false; then : $as_echo_n "(cached) " >&6 else case $PKG_CONFIG in [\\/]* | ?:[\\/]*) ac_cv_path_PKG_CONFIG="$PKG_CONFIG" # Let the user override the test with a path. ;; *) as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_path_PKG_CONFIG="$as_dir/$ac_word$ac_exec_ext" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS ;; esac fi PKG_CONFIG=$ac_cv_path_PKG_CONFIG if test -n "$PKG_CONFIG"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $PKG_CONFIG" >&5 $as_echo "$PKG_CONFIG" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi fi if test -z "$ac_cv_path_PKG_CONFIG"; then ac_pt_PKG_CONFIG=$PKG_CONFIG # Extract the first word of "pkg-config", so it can be a program name with args. set dummy pkg-config; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_path_ac_pt_PKG_CONFIG+:} false; then : $as_echo_n "(cached) " >&6 else case $ac_pt_PKG_CONFIG in [\\/]* | ?:[\\/]*) ac_cv_path_ac_pt_PKG_CONFIG="$ac_pt_PKG_CONFIG" # Let the user override the test with a path. ;; *) as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_path_ac_pt_PKG_CONFIG="$as_dir/$ac_word$ac_exec_ext" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS ;; esac fi ac_pt_PKG_CONFIG=$ac_cv_path_ac_pt_PKG_CONFIG if test -n "$ac_pt_PKG_CONFIG"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_pt_PKG_CONFIG" >&5 $as_echo "$ac_pt_PKG_CONFIG" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi if test "x$ac_pt_PKG_CONFIG" = x; then PKG_CONFIG="" else case $cross_compiling:$ac_tool_warned in yes:) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 $as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} ac_tool_warned=yes ;; esac PKG_CONFIG=$ac_pt_PKG_CONFIG fi else PKG_CONFIG="$ac_cv_path_PKG_CONFIG" fi fi if test -n "$PKG_CONFIG"; then _pkg_min_version=0.9.0 { $as_echo "$as_me:${as_lineno-$LINENO}: checking pkg-config is at least version $_pkg_min_version" >&5 $as_echo_n "checking pkg-config is at least version $_pkg_min_version... " >&6; } if $PKG_CONFIG --atleast-pkgconfig-version $_pkg_min_version; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 $as_echo "yes" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } PKG_CONFIG="" fi fi # Check whether --with-systemdsystemunitdir was given. if test "${with_systemdsystemunitdir+set}" = set; then : withval=$with_systemdsystemunitdir; else with_systemdsystemunitdir=$($PKG_CONFIG --variable=systemdsystemunitdir systemd) fi if test "x$with_systemdsystemunitdir" != xno; then systemdsystemunitdir=$with_systemdsystemunitdir fi if test -n "$with_systemdsystemunitdir" -a "x$with_systemdsystemunitdir" != xno ; then HAVE_SYSTEMD_TRUE= HAVE_SYSTEMD_FALSE='#' else HAVE_SYSTEMD_TRUE='#' HAVE_SYSTEMD_FALSE= fi ac_config_files="$ac_config_files libstoragemgmt.pc Makefile c_binding/Makefile c_binding/include/Makefile c_binding/include/libstoragemgmt/Makefile c_binding/include/libstoragemgmt/libstoragemgmt_version.h python_binding/Makefile python_binding/lsm/version.py plugin/Makefile plugin/simc/Makefile plugin/megaraid/Makefile plugin/hpsa/Makefile daemon/Makefile config/Makefile doc/Makefile doc/man/lsmcli.1 doc/man/lsmd.1 doc/man/sim_lsmplugin.1 doc/man/simc_lsmplugin.1 doc/man/smispy_lsmplugin.1 doc/man/ontap_lsmplugin.1 doc/man/targetd_lsmplugin.1 doc/man/nstor_lsmplugin.1 doc/doxygen.conf doc/man/lsmd.conf.5 doc/man/megaraid_lsmplugin.1 doc/man/hpsa_lsmplugin.1 tools/Makefile tools/udev/Makefile tools/lsmcli/Makefile tools/utility/Makefile tools/bash_completion/Makefile packaging/Makefile packaging/daemon/Makefile packaging/libstoragemgmt.spec doc/man/Makefile test/Makefile" cat >confcache <<\_ACEOF # This file is a shell script that caches the results of configure # tests run on this system so they can be shared between configure # scripts and configure runs, see configure's option --config-cache. # It is not useful on other systems. If it contains results you don't # want to keep, you may remove or edit it. # # config.status only pays attention to the cache file if you give it # the --recheck option to rerun configure. # # `ac_cv_env_foo' variables (set or unset) will be overridden when # loading this file, other *unset* `ac_cv_foo' will be assigned the # following values. _ACEOF # The following way of writing the cache mishandles newlines in values, # but we know of no workaround that is simple, portable, and efficient. # So, we kill variables containing newlines. # Ultrix sh set writes to stderr and can't be redirected directly, # and sets the high bit in the cache file unless we assign to the vars. ( for ac_var in `(set) 2>&1 | sed -n 's/^\([a-zA-Z_][a-zA-Z0-9_]*\)=.*/\1/p'`; do eval ac_val=\$$ac_var case $ac_val in #( *${as_nl}*) case $ac_var in #( *_cv_*) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: cache variable $ac_var contains a newline" >&5 $as_echo "$as_me: WARNING: cache variable $ac_var contains a newline" >&2;} ;; esac case $ac_var in #( _ | IFS | as_nl) ;; #( BASH_ARGV | BASH_SOURCE) eval $ac_var= ;; #( *) { eval $ac_var=; unset $ac_var;} ;; esac ;; esac done (set) 2>&1 | case $as_nl`(ac_space=' '; set) 2>&1` in #( *${as_nl}ac_space=\ *) # `set' does not quote correctly, so add quotes: double-quote # substitution turns \\\\ into \\, and sed turns \\ into \. sed -n \ "s/'/'\\\\''/g; s/^\\([_$as_cr_alnum]*_cv_[_$as_cr_alnum]*\\)=\\(.*\\)/\\1='\\2'/p" ;; #( *) # `set' quotes correctly as required by POSIX, so do not add quotes. sed -n "/^[_$as_cr_alnum]*_cv_[_$as_cr_alnum]*=/p" ;; esac | sort ) | sed ' /^ac_cv_env_/b end t clear :clear s/^\([^=]*\)=\(.*[{}].*\)$/test "${\1+set}" = set || &/ t end s/^\([^=]*\)=\(.*\)$/\1=${\1=\2}/ :end' >>confcache if diff "$cache_file" confcache >/dev/null 2>&1; then :; else if test -w "$cache_file"; then if test "x$cache_file" != "x/dev/null"; then { $as_echo "$as_me:${as_lineno-$LINENO}: updating cache $cache_file" >&5 $as_echo "$as_me: updating cache $cache_file" >&6;} if test ! -f "$cache_file" || test -h "$cache_file"; then cat confcache >"$cache_file" else case $cache_file in #( */* | ?:*) mv -f confcache "$cache_file"$$ && mv -f "$cache_file"$$ "$cache_file" ;; #( *) mv -f confcache "$cache_file" ;; esac fi fi else { $as_echo "$as_me:${as_lineno-$LINENO}: not updating unwritable cache $cache_file" >&5 $as_echo "$as_me: not updating unwritable cache $cache_file" >&6;} fi fi rm -f confcache test "x$prefix" = xNONE && prefix=$ac_default_prefix # Let make expand exec_prefix. test "x$exec_prefix" = xNONE && exec_prefix='${prefix}' DEFS=-DHAVE_CONFIG_H ac_libobjs= ac_ltlibobjs= for ac_i in : $LIBOBJS; do test "x$ac_i" = x: && continue # 1. Remove the extension, and $U if already installed. ac_script='s/\$U\././;s/\.o$//;s/\.obj$//' ac_i=`$as_echo "$ac_i" | sed "$ac_script"` # 2. Prepend LIBOBJDIR. When used with automake>=1.10 LIBOBJDIR # will be set to the directory where LIBOBJS objects are built. as_fn_append ac_libobjs " \${LIBOBJDIR}$ac_i\$U.$ac_objext" as_fn_append ac_ltlibobjs " \${LIBOBJDIR}$ac_i"'$U.lo' done LIBOBJS=$ac_libobjs LTLIBOBJS=$ac_ltlibobjs { $as_echo "$as_me:${as_lineno-$LINENO}: checking that generated files are newer than configure" >&5 $as_echo_n "checking that generated files are newer than configure... " >&6; } if test -n "$am_sleep_pid"; then # Hide warnings about reused PIDs. wait $am_sleep_pid 2>/dev/null fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: done" >&5 $as_echo "done" >&6; } if test -n "$EXEEXT"; then am__EXEEXT_TRUE= am__EXEEXT_FALSE='#' else am__EXEEXT_TRUE='#' am__EXEEXT_FALSE= fi if test -z "${MAINTAINER_MODE_TRUE}" && test -z "${MAINTAINER_MODE_FALSE}"; then as_fn_error $? "conditional \"MAINTAINER_MODE\" was never defined. Usually this means the macro was only invoked conditionally." "$LINENO" 5 fi if test -z "${AMDEP_TRUE}" && test -z "${AMDEP_FALSE}"; then as_fn_error $? "conditional \"AMDEP\" was never defined. Usually this means the macro was only invoked conditionally." "$LINENO" 5 fi if test -z "${am__fastdepCC_TRUE}" && test -z "${am__fastdepCC_FALSE}"; then as_fn_error $? "conditional \"am__fastdepCC\" was never defined. Usually this means the macro was only invoked conditionally." "$LINENO" 5 fi if test -z "${am__fastdepCXX_TRUE}" && test -z "${am__fastdepCXX_FALSE}"; then as_fn_error $? "conditional \"am__fastdepCXX\" was never defined. Usually this means the macro was only invoked conditionally." "$LINENO" 5 fi if test -z "${am__fastdepCC_TRUE}" && test -z "${am__fastdepCC_FALSE}"; then as_fn_error $? "conditional \"am__fastdepCC\" was never defined. Usually this means the macro was only invoked conditionally." "$LINENO" 5 fi if test -z "${BUILD_C_UNIT_TRUE}" && test -z "${BUILD_C_UNIT_FALSE}"; then as_fn_error $? "conditional \"BUILD_C_UNIT\" was never defined. Usually this means the macro was only invoked conditionally." "$LINENO" 5 fi if test -z "${WITH_REST_API_TRUE}" && test -z "${WITH_REST_API_FALSE}"; then as_fn_error $? "conditional \"WITH_REST_API\" was never defined. Usually this means the macro was only invoked conditionally." "$LINENO" 5 fi if test -z "${WITH_MEGARAID_TRUE}" && test -z "${WITH_MEGARAID_FALSE}"; then as_fn_error $? "conditional \"WITH_MEGARAID\" was never defined. Usually this means the macro was only invoked conditionally." "$LINENO" 5 fi if test -z "${WITH_HPSA_TRUE}" && test -z "${WITH_HPSA_FALSE}"; then as_fn_error $? "conditional \"WITH_HPSA\" was never defined. Usually this means the macro was only invoked conditionally." "$LINENO" 5 fi if test -z "${WITH_BASH_COMPLETION_TRUE}" && test -z "${WITH_BASH_COMPLETION_FALSE}"; then as_fn_error $? "conditional \"WITH_BASH_COMPLETION\" was never defined. Usually this means the macro was only invoked conditionally." "$LINENO" 5 fi if test -z "${HAVE_SYSTEMD_TRUE}" && test -z "${HAVE_SYSTEMD_FALSE}"; then as_fn_error $? "conditional \"HAVE_SYSTEMD\" was never defined. Usually this means the macro was only invoked conditionally." "$LINENO" 5 fi : "${CONFIG_STATUS=./config.status}" ac_write_fail=0 ac_clean_files_save=$ac_clean_files ac_clean_files="$ac_clean_files $CONFIG_STATUS" { $as_echo "$as_me:${as_lineno-$LINENO}: creating $CONFIG_STATUS" >&5 $as_echo "$as_me: creating $CONFIG_STATUS" >&6;} as_write_fail=0 cat >$CONFIG_STATUS <<_ASEOF || as_write_fail=1 #! $SHELL # Generated by $as_me. # Run this file to recreate the current configuration. # Compiler output produced by configure, useful for debugging # configure, is in config.log if it exists. debug=false ac_cs_recheck=false ac_cs_silent=false SHELL=\${CONFIG_SHELL-$SHELL} export SHELL _ASEOF cat >>$CONFIG_STATUS <<\_ASEOF || as_write_fail=1 ## -------------------- ## ## M4sh Initialization. ## ## -------------------- ## # Be more Bourne compatible DUALCASE=1; export DUALCASE # for MKS sh if test -n "${ZSH_VERSION+set}" && (emulate sh) >/dev/null 2>&1; then : emulate sh NULLCMD=: # Pre-4.2 versions of Zsh do word splitting on ${1+"$@"}, which # is contrary to our usage. Disable this feature. alias -g '${1+"$@"}'='"$@"' setopt NO_GLOB_SUBST else case `(set -o) 2>/dev/null` in #( *posix*) : set -o posix ;; #( *) : ;; esac fi as_nl=' ' export as_nl # Printing a long string crashes Solaris 7 /usr/bin/printf. as_echo='\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\' as_echo=$as_echo$as_echo$as_echo$as_echo$as_echo as_echo=$as_echo$as_echo$as_echo$as_echo$as_echo$as_echo # Prefer a ksh shell builtin over an external printf program on Solaris, # but without wasting forks for bash or zsh. if test -z "$BASH_VERSION$ZSH_VERSION" \ && (test "X`print -r -- $as_echo`" = "X$as_echo") 2>/dev/null; then as_echo='print -r --' as_echo_n='print -rn --' elif (test "X`printf %s $as_echo`" = "X$as_echo") 2>/dev/null; then as_echo='printf %s\n' as_echo_n='printf %s' else if test "X`(/usr/ucb/echo -n -n $as_echo) 2>/dev/null`" = "X-n $as_echo"; then as_echo_body='eval /usr/ucb/echo -n "$1$as_nl"' as_echo_n='/usr/ucb/echo -n' else as_echo_body='eval expr "X$1" : "X\\(.*\\)"' as_echo_n_body='eval arg=$1; case $arg in #( *"$as_nl"*) expr "X$arg" : "X\\(.*\\)$as_nl"; arg=`expr "X$arg" : ".*$as_nl\\(.*\\)"`;; esac; expr "X$arg" : "X\\(.*\\)" | tr -d "$as_nl" ' export as_echo_n_body as_echo_n='sh -c $as_echo_n_body as_echo' fi export as_echo_body as_echo='sh -c $as_echo_body as_echo' fi # The user is always right. if test "${PATH_SEPARATOR+set}" != set; then PATH_SEPARATOR=: (PATH='/bin;/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 && { (PATH='/bin:/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 || PATH_SEPARATOR=';' } fi # IFS # We need space, tab and new line, in precisely that order. Quoting is # there to prevent editors from complaining about space-tab. # (If _AS_PATH_WALK were called with IFS unset, it would disable word # splitting by setting IFS to empty value.) IFS=" "" $as_nl" # Find who we are. Look in the path if we contain no directory separator. as_myself= case $0 in #(( *[\\/]* ) as_myself=$0 ;; *) as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. test -r "$as_dir/$0" && as_myself=$as_dir/$0 && break done IFS=$as_save_IFS ;; esac # We did not find ourselves, most probably we were run as `sh COMMAND' # in which case we are not to be found in the path. if test "x$as_myself" = x; then as_myself=$0 fi if test ! -f "$as_myself"; then $as_echo "$as_myself: error: cannot find myself; rerun with an absolute file name" >&2 exit 1 fi # Unset variables that we do not need and which cause bugs (e.g. in # pre-3.0 UWIN ksh). But do not cause bugs in bash 2.01; the "|| exit 1" # suppresses any "Segmentation fault" message there. '((' could # trigger a bug in pdksh 5.2.14. for as_var in BASH_ENV ENV MAIL MAILPATH do eval test x\${$as_var+set} = xset \ && ( (unset $as_var) || exit 1) >/dev/null 2>&1 && unset $as_var || : done PS1='$ ' PS2='> ' PS4='+ ' # NLS nuisances. LC_ALL=C export LC_ALL LANGUAGE=C export LANGUAGE # CDPATH. (unset CDPATH) >/dev/null 2>&1 && unset CDPATH # as_fn_error STATUS ERROR [LINENO LOG_FD] # ---------------------------------------- # Output "`basename $0`: error: ERROR" to stderr. If LINENO and LOG_FD are # provided, also output the error to LOG_FD, referencing LINENO. Then exit the # script with STATUS, using 1 if that was 0. as_fn_error () { as_status=$1; test $as_status -eq 0 && as_status=1 if test "$4"; then as_lineno=${as_lineno-"$3"} as_lineno_stack=as_lineno_stack=$as_lineno_stack $as_echo "$as_me:${as_lineno-$LINENO}: error: $2" >&$4 fi $as_echo "$as_me: error: $2" >&2 as_fn_exit $as_status } # as_fn_error # as_fn_set_status STATUS # ----------------------- # Set $? to STATUS, without forking. as_fn_set_status () { return $1 } # as_fn_set_status # as_fn_exit STATUS # ----------------- # Exit the shell with STATUS, even in a "trap 0" or "set -e" context. as_fn_exit () { set +e as_fn_set_status $1 exit $1 } # as_fn_exit # as_fn_unset VAR # --------------- # Portably unset VAR. as_fn_unset () { { eval $1=; unset $1;} } as_unset=as_fn_unset # as_fn_append VAR VALUE # ---------------------- # Append the text in VALUE to the end of the definition contained in VAR. Take # advantage of any shell optimizations that allow amortized linear growth over # repeated appends, instead of the typical quadratic growth present in naive # implementations. if (eval "as_var=1; as_var+=2; test x\$as_var = x12") 2>/dev/null; then : eval 'as_fn_append () { eval $1+=\$2 }' else as_fn_append () { eval $1=\$$1\$2 } fi # as_fn_append # as_fn_arith ARG... # ------------------ # Perform arithmetic evaluation on the ARGs, and store the result in the # global $as_val. Take advantage of shells that can avoid forks. The arguments # must be portable across $(()) and expr. if (eval "test \$(( 1 + 1 )) = 2") 2>/dev/null; then : eval 'as_fn_arith () { as_val=$(( $* )) }' else as_fn_arith () { as_val=`expr "$@" || test $? -eq 1` } fi # as_fn_arith if expr a : '\(a\)' >/dev/null 2>&1 && test "X`expr 00001 : '.*\(...\)'`" = X001; then as_expr=expr else as_expr=false fi if (basename -- /) >/dev/null 2>&1 && test "X`basename -- / 2>&1`" = "X/"; then as_basename=basename else as_basename=false fi if (as_dir=`dirname -- /` && test "X$as_dir" = X/) >/dev/null 2>&1; then as_dirname=dirname else as_dirname=false fi as_me=`$as_basename -- "$0" || $as_expr X/"$0" : '.*/\([^/][^/]*\)/*$' \| \ X"$0" : 'X\(//\)$' \| \ X"$0" : 'X\(/\)' \| . 2>/dev/null || $as_echo X/"$0" | sed '/^.*\/\([^/][^/]*\)\/*$/{ s//\1/ q } /^X\/\(\/\/\)$/{ s//\1/ q } /^X\/\(\/\).*/{ s//\1/ q } s/.*/./; q'` # Avoid depending upon Character Ranges. as_cr_letters='abcdefghijklmnopqrstuvwxyz' as_cr_LETTERS='ABCDEFGHIJKLMNOPQRSTUVWXYZ' as_cr_Letters=$as_cr_letters$as_cr_LETTERS as_cr_digits='0123456789' as_cr_alnum=$as_cr_Letters$as_cr_digits ECHO_C= ECHO_N= ECHO_T= case `echo -n x` in #((((( -n*) case `echo 'xy\c'` in *c*) ECHO_T=' ';; # ECHO_T is single tab character. xy) ECHO_C='\c';; *) echo `echo ksh88 bug on AIX 6.1` > /dev/null ECHO_T=' ';; esac;; *) ECHO_N='-n';; esac rm -f conf$$ conf$$.exe conf$$.file if test -d conf$$.dir; then rm -f conf$$.dir/conf$$.file else rm -f conf$$.dir mkdir conf$$.dir 2>/dev/null fi if (echo >conf$$.file) 2>/dev/null; then if ln -s conf$$.file conf$$ 2>/dev/null; then as_ln_s='ln -s' # ... but there are two gotchas: # 1) On MSYS, both `ln -s file dir' and `ln file dir' fail. # 2) DJGPP < 2.04 has no symlinks; `ln -s' creates a wrapper executable. # In both cases, we have to default to `cp -pR'. ln -s conf$$.file conf$$.dir 2>/dev/null && test ! -f conf$$.exe || as_ln_s='cp -pR' elif ln conf$$.file conf$$ 2>/dev/null; then as_ln_s=ln else as_ln_s='cp -pR' fi else as_ln_s='cp -pR' fi rm -f conf$$ conf$$.exe conf$$.dir/conf$$.file conf$$.file rmdir conf$$.dir 2>/dev/null # as_fn_mkdir_p # ------------- # Create "$as_dir" as a directory, including parents if necessary. as_fn_mkdir_p () { case $as_dir in #( -*) as_dir=./$as_dir;; esac test -d "$as_dir" || eval $as_mkdir_p || { as_dirs= while :; do case $as_dir in #( *\'*) as_qdir=`$as_echo "$as_dir" | sed "s/'/'\\\\\\\\''/g"`;; #'( *) as_qdir=$as_dir;; esac as_dirs="'$as_qdir' $as_dirs" as_dir=`$as_dirname -- "$as_dir" || $as_expr X"$as_dir" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ X"$as_dir" : 'X\(//\)[^/]' \| \ X"$as_dir" : 'X\(//\)$' \| \ X"$as_dir" : 'X\(/\)' \| . 2>/dev/null || $as_echo X"$as_dir" | sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ s//\1/ q } /^X\(\/\/\)[^/].*/{ s//\1/ q } /^X\(\/\/\)$/{ s//\1/ q } /^X\(\/\).*/{ s//\1/ q } s/.*/./; q'` test -d "$as_dir" && break done test -z "$as_dirs" || eval "mkdir $as_dirs" } || test -d "$as_dir" || as_fn_error $? "cannot create directory $as_dir" } # as_fn_mkdir_p if mkdir -p . 2>/dev/null; then as_mkdir_p='mkdir -p "$as_dir"' else test -d ./-p && rmdir ./-p as_mkdir_p=false fi # as_fn_executable_p FILE # ----------------------- # Test if FILE is an executable regular file. as_fn_executable_p () { test -f "$1" && test -x "$1" } # as_fn_executable_p as_test_x='test -x' as_executable_p=as_fn_executable_p # Sed expression to map a string onto a valid CPP name. as_tr_cpp="eval sed 'y%*$as_cr_letters%P$as_cr_LETTERS%;s%[^_$as_cr_alnum]%_%g'" # Sed expression to map a string onto a valid variable name. as_tr_sh="eval sed 'y%*+%pp%;s%[^_$as_cr_alnum]%_%g'" exec 6>&1 ## ----------------------------------- ## ## Main body of $CONFIG_STATUS script. ## ## ----------------------------------- ## _ASEOF test $as_write_fail = 0 && chmod +x $CONFIG_STATUS || ac_write_fail=1 cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 # Save the log message, to keep $0 and so on meaningful, and to # report actual input values of CONFIG_FILES etc. instead of their # values after options handling. ac_log=" This file was extended by libstoragemgmt $as_me 1.2.3, which was generated by GNU Autoconf 2.69. Invocation command line was CONFIG_FILES = $CONFIG_FILES CONFIG_HEADERS = $CONFIG_HEADERS CONFIG_LINKS = $CONFIG_LINKS CONFIG_COMMANDS = $CONFIG_COMMANDS $ $0 $@ on `(hostname || uname -n) 2>/dev/null | sed 1q` " _ACEOF case $ac_config_files in *" "*) set x $ac_config_files; shift; ac_config_files=$*;; esac case $ac_config_headers in *" "*) set x $ac_config_headers; shift; ac_config_headers=$*;; esac cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 # Files that config.status was made for. config_files="$ac_config_files" config_headers="$ac_config_headers" config_commands="$ac_config_commands" _ACEOF cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 ac_cs_usage="\ \`$as_me' instantiates files and other configuration actions from templates according to the current configuration. Unless the files and actions are specified as TAGs, all are instantiated by default. Usage: $0 [OPTION]... [TAG]... -h, --help print this help, then exit -V, --version print version number and configuration settings, then exit --config print configuration, then exit -q, --quiet, --silent do not print progress messages -d, --debug don't remove temporary files --recheck update $as_me by reconfiguring in the same conditions --file=FILE[:TEMPLATE] instantiate the configuration file FILE --header=FILE[:TEMPLATE] instantiate the configuration header FILE Configuration files: $config_files Configuration headers: $config_headers Configuration commands: $config_commands Report bugs to . libstoragemgmt home page: ." _ACEOF cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 ac_cs_config="`$as_echo "$ac_configure_args" | sed 's/^ //; s/[\\""\`\$]/\\\\&/g'`" ac_cs_version="\\ libstoragemgmt config.status 1.2.3 configured by $0, generated by GNU Autoconf 2.69, with options \\"\$ac_cs_config\\" Copyright (C) 2012 Free Software Foundation, Inc. This config.status script is free software; the Free Software Foundation gives unlimited permission to copy, distribute and modify it." ac_pwd='$ac_pwd' srcdir='$srcdir' INSTALL='$INSTALL' MKDIR_P='$MKDIR_P' AWK='$AWK' test -n "\$AWK" || AWK=awk _ACEOF cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 # The default lists apply if the user does not specify any file. ac_need_defaults=: while test $# != 0 do case $1 in --*=?*) ac_option=`expr "X$1" : 'X\([^=]*\)='` ac_optarg=`expr "X$1" : 'X[^=]*=\(.*\)'` ac_shift=: ;; --*=) ac_option=`expr "X$1" : 'X\([^=]*\)='` ac_optarg= ac_shift=: ;; *) ac_option=$1 ac_optarg=$2 ac_shift=shift ;; esac case $ac_option in # Handling of the options. -recheck | --recheck | --rechec | --reche | --rech | --rec | --re | --r) ac_cs_recheck=: ;; --version | --versio | --versi | --vers | --ver | --ve | --v | -V ) $as_echo "$ac_cs_version"; exit ;; --config | --confi | --conf | --con | --co | --c ) $as_echo "$ac_cs_config"; exit ;; --debug | --debu | --deb | --de | --d | -d ) debug=: ;; --file | --fil | --fi | --f ) $ac_shift case $ac_optarg in *\'*) ac_optarg=`$as_echo "$ac_optarg" | sed "s/'/'\\\\\\\\''/g"` ;; '') as_fn_error $? "missing file argument" ;; esac as_fn_append CONFIG_FILES " '$ac_optarg'" ac_need_defaults=false;; --header | --heade | --head | --hea ) $ac_shift case $ac_optarg in *\'*) ac_optarg=`$as_echo "$ac_optarg" | sed "s/'/'\\\\\\\\''/g"` ;; esac as_fn_append CONFIG_HEADERS " '$ac_optarg'" ac_need_defaults=false;; --he | --h) # Conflict between --help and --header as_fn_error $? "ambiguous option: \`$1' Try \`$0 --help' for more information.";; --help | --hel | -h ) $as_echo "$ac_cs_usage"; exit ;; -q | -quiet | --quiet | --quie | --qui | --qu | --q \ | -silent | --silent | --silen | --sile | --sil | --si | --s) ac_cs_silent=: ;; # This is an error. -*) as_fn_error $? "unrecognized option: \`$1' Try \`$0 --help' for more information." ;; *) as_fn_append ac_config_targets " $1" ac_need_defaults=false ;; esac shift done ac_configure_extra_args= if $ac_cs_silent; then exec 6>/dev/null ac_configure_extra_args="$ac_configure_extra_args --silent" fi _ACEOF cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 if \$ac_cs_recheck; then set X $SHELL '$0' $ac_configure_args \$ac_configure_extra_args --no-create --no-recursion shift \$as_echo "running CONFIG_SHELL=$SHELL \$*" >&6 CONFIG_SHELL='$SHELL' export CONFIG_SHELL exec "\$@" fi _ACEOF cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 exec 5>>config.log { echo sed 'h;s/./-/g;s/^.../## /;s/...$/ ##/;p;x;p;x' <<_ASBOX ## Running $as_me. ## _ASBOX $as_echo "$ac_log" } >&5 _ACEOF cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 # # INIT-COMMANDS # AMDEP_TRUE="$AMDEP_TRUE" ac_aux_dir="$ac_aux_dir" # The HP-UX ksh and POSIX shell print the target directory to stdout # if CDPATH is set. (unset CDPATH) >/dev/null 2>&1 && unset CDPATH sed_quote_subst='$sed_quote_subst' double_quote_subst='$double_quote_subst' delay_variable_subst='$delay_variable_subst' macro_version='`$ECHO "$macro_version" | $SED "$delay_single_quote_subst"`' macro_revision='`$ECHO "$macro_revision" | $SED "$delay_single_quote_subst"`' enable_shared='`$ECHO "$enable_shared" | $SED "$delay_single_quote_subst"`' enable_static='`$ECHO "$enable_static" | $SED "$delay_single_quote_subst"`' pic_mode='`$ECHO "$pic_mode" | $SED "$delay_single_quote_subst"`' enable_fast_install='`$ECHO "$enable_fast_install" | $SED "$delay_single_quote_subst"`' SHELL='`$ECHO "$SHELL" | $SED "$delay_single_quote_subst"`' ECHO='`$ECHO "$ECHO" | $SED "$delay_single_quote_subst"`' PATH_SEPARATOR='`$ECHO "$PATH_SEPARATOR" | $SED "$delay_single_quote_subst"`' host_alias='`$ECHO "$host_alias" | $SED "$delay_single_quote_subst"`' host='`$ECHO "$host" | $SED "$delay_single_quote_subst"`' host_os='`$ECHO "$host_os" | $SED "$delay_single_quote_subst"`' build_alias='`$ECHO "$build_alias" | $SED "$delay_single_quote_subst"`' build='`$ECHO "$build" | $SED "$delay_single_quote_subst"`' build_os='`$ECHO "$build_os" | $SED "$delay_single_quote_subst"`' SED='`$ECHO "$SED" | $SED "$delay_single_quote_subst"`' Xsed='`$ECHO "$Xsed" | $SED "$delay_single_quote_subst"`' GREP='`$ECHO "$GREP" | $SED "$delay_single_quote_subst"`' EGREP='`$ECHO "$EGREP" | $SED "$delay_single_quote_subst"`' FGREP='`$ECHO "$FGREP" | $SED "$delay_single_quote_subst"`' LD='`$ECHO "$LD" | $SED "$delay_single_quote_subst"`' NM='`$ECHO "$NM" | $SED "$delay_single_quote_subst"`' LN_S='`$ECHO "$LN_S" | $SED "$delay_single_quote_subst"`' max_cmd_len='`$ECHO "$max_cmd_len" | $SED "$delay_single_quote_subst"`' ac_objext='`$ECHO "$ac_objext" | $SED "$delay_single_quote_subst"`' exeext='`$ECHO "$exeext" | $SED "$delay_single_quote_subst"`' lt_unset='`$ECHO "$lt_unset" | $SED "$delay_single_quote_subst"`' lt_SP2NL='`$ECHO "$lt_SP2NL" | $SED "$delay_single_quote_subst"`' lt_NL2SP='`$ECHO "$lt_NL2SP" | $SED "$delay_single_quote_subst"`' lt_cv_to_host_file_cmd='`$ECHO "$lt_cv_to_host_file_cmd" | $SED "$delay_single_quote_subst"`' lt_cv_to_tool_file_cmd='`$ECHO "$lt_cv_to_tool_file_cmd" | $SED "$delay_single_quote_subst"`' reload_flag='`$ECHO "$reload_flag" | $SED "$delay_single_quote_subst"`' reload_cmds='`$ECHO "$reload_cmds" | $SED "$delay_single_quote_subst"`' OBJDUMP='`$ECHO "$OBJDUMP" | $SED "$delay_single_quote_subst"`' deplibs_check_method='`$ECHO "$deplibs_check_method" | $SED "$delay_single_quote_subst"`' file_magic_cmd='`$ECHO "$file_magic_cmd" | $SED "$delay_single_quote_subst"`' file_magic_glob='`$ECHO "$file_magic_glob" | $SED "$delay_single_quote_subst"`' want_nocaseglob='`$ECHO "$want_nocaseglob" | $SED "$delay_single_quote_subst"`' DLLTOOL='`$ECHO "$DLLTOOL" | $SED "$delay_single_quote_subst"`' sharedlib_from_linklib_cmd='`$ECHO "$sharedlib_from_linklib_cmd" | $SED "$delay_single_quote_subst"`' AR='`$ECHO "$AR" | $SED "$delay_single_quote_subst"`' AR_FLAGS='`$ECHO "$AR_FLAGS" | $SED "$delay_single_quote_subst"`' archiver_list_spec='`$ECHO "$archiver_list_spec" | $SED "$delay_single_quote_subst"`' STRIP='`$ECHO "$STRIP" | $SED "$delay_single_quote_subst"`' RANLIB='`$ECHO "$RANLIB" | $SED "$delay_single_quote_subst"`' old_postinstall_cmds='`$ECHO "$old_postinstall_cmds" | $SED "$delay_single_quote_subst"`' old_postuninstall_cmds='`$ECHO "$old_postuninstall_cmds" | $SED "$delay_single_quote_subst"`' old_archive_cmds='`$ECHO "$old_archive_cmds" | $SED "$delay_single_quote_subst"`' lock_old_archive_extraction='`$ECHO "$lock_old_archive_extraction" | $SED "$delay_single_quote_subst"`' CC='`$ECHO "$CC" | $SED "$delay_single_quote_subst"`' CFLAGS='`$ECHO "$CFLAGS" | $SED "$delay_single_quote_subst"`' compiler='`$ECHO "$compiler" | $SED "$delay_single_quote_subst"`' GCC='`$ECHO "$GCC" | $SED "$delay_single_quote_subst"`' lt_cv_sys_global_symbol_pipe='`$ECHO "$lt_cv_sys_global_symbol_pipe" | $SED "$delay_single_quote_subst"`' lt_cv_sys_global_symbol_to_cdecl='`$ECHO "$lt_cv_sys_global_symbol_to_cdecl" | $SED "$delay_single_quote_subst"`' lt_cv_sys_global_symbol_to_c_name_address='`$ECHO "$lt_cv_sys_global_symbol_to_c_name_address" | $SED "$delay_single_quote_subst"`' lt_cv_sys_global_symbol_to_c_name_address_lib_prefix='`$ECHO "$lt_cv_sys_global_symbol_to_c_name_address_lib_prefix" | $SED "$delay_single_quote_subst"`' nm_file_list_spec='`$ECHO "$nm_file_list_spec" | $SED "$delay_single_quote_subst"`' lt_sysroot='`$ECHO "$lt_sysroot" | $SED "$delay_single_quote_subst"`' objdir='`$ECHO "$objdir" | $SED "$delay_single_quote_subst"`' MAGIC_CMD='`$ECHO "$MAGIC_CMD" | $SED "$delay_single_quote_subst"`' lt_prog_compiler_no_builtin_flag='`$ECHO "$lt_prog_compiler_no_builtin_flag" | $SED "$delay_single_quote_subst"`' lt_prog_compiler_pic='`$ECHO "$lt_prog_compiler_pic" | $SED "$delay_single_quote_subst"`' lt_prog_compiler_wl='`$ECHO "$lt_prog_compiler_wl" | $SED "$delay_single_quote_subst"`' lt_prog_compiler_static='`$ECHO "$lt_prog_compiler_static" | $SED "$delay_single_quote_subst"`' lt_cv_prog_compiler_c_o='`$ECHO "$lt_cv_prog_compiler_c_o" | $SED "$delay_single_quote_subst"`' need_locks='`$ECHO "$need_locks" | $SED "$delay_single_quote_subst"`' MANIFEST_TOOL='`$ECHO "$MANIFEST_TOOL" | $SED "$delay_single_quote_subst"`' DSYMUTIL='`$ECHO "$DSYMUTIL" | $SED "$delay_single_quote_subst"`' NMEDIT='`$ECHO "$NMEDIT" | $SED "$delay_single_quote_subst"`' LIPO='`$ECHO "$LIPO" | $SED "$delay_single_quote_subst"`' OTOOL='`$ECHO "$OTOOL" | $SED "$delay_single_quote_subst"`' OTOOL64='`$ECHO "$OTOOL64" | $SED "$delay_single_quote_subst"`' libext='`$ECHO "$libext" | $SED "$delay_single_quote_subst"`' shrext_cmds='`$ECHO "$shrext_cmds" | $SED "$delay_single_quote_subst"`' extract_expsyms_cmds='`$ECHO "$extract_expsyms_cmds" | $SED "$delay_single_quote_subst"`' archive_cmds_need_lc='`$ECHO "$archive_cmds_need_lc" | $SED "$delay_single_quote_subst"`' enable_shared_with_static_runtimes='`$ECHO "$enable_shared_with_static_runtimes" | $SED "$delay_single_quote_subst"`' export_dynamic_flag_spec='`$ECHO "$export_dynamic_flag_spec" | $SED "$delay_single_quote_subst"`' whole_archive_flag_spec='`$ECHO "$whole_archive_flag_spec" | $SED "$delay_single_quote_subst"`' compiler_needs_object='`$ECHO "$compiler_needs_object" | $SED "$delay_single_quote_subst"`' old_archive_from_new_cmds='`$ECHO "$old_archive_from_new_cmds" | $SED "$delay_single_quote_subst"`' old_archive_from_expsyms_cmds='`$ECHO "$old_archive_from_expsyms_cmds" | $SED "$delay_single_quote_subst"`' archive_cmds='`$ECHO "$archive_cmds" | $SED "$delay_single_quote_subst"`' archive_expsym_cmds='`$ECHO "$archive_expsym_cmds" | $SED "$delay_single_quote_subst"`' module_cmds='`$ECHO "$module_cmds" | $SED "$delay_single_quote_subst"`' module_expsym_cmds='`$ECHO "$module_expsym_cmds" | $SED "$delay_single_quote_subst"`' with_gnu_ld='`$ECHO "$with_gnu_ld" | $SED "$delay_single_quote_subst"`' allow_undefined_flag='`$ECHO "$allow_undefined_flag" | $SED "$delay_single_quote_subst"`' no_undefined_flag='`$ECHO "$no_undefined_flag" | $SED "$delay_single_quote_subst"`' hardcode_libdir_flag_spec='`$ECHO "$hardcode_libdir_flag_spec" | $SED "$delay_single_quote_subst"`' hardcode_libdir_separator='`$ECHO "$hardcode_libdir_separator" | $SED "$delay_single_quote_subst"`' hardcode_direct='`$ECHO "$hardcode_direct" | $SED "$delay_single_quote_subst"`' hardcode_direct_absolute='`$ECHO "$hardcode_direct_absolute" | $SED "$delay_single_quote_subst"`' hardcode_minus_L='`$ECHO "$hardcode_minus_L" | $SED "$delay_single_quote_subst"`' hardcode_shlibpath_var='`$ECHO "$hardcode_shlibpath_var" | $SED "$delay_single_quote_subst"`' hardcode_automatic='`$ECHO "$hardcode_automatic" | $SED "$delay_single_quote_subst"`' inherit_rpath='`$ECHO "$inherit_rpath" | $SED "$delay_single_quote_subst"`' link_all_deplibs='`$ECHO "$link_all_deplibs" | $SED "$delay_single_quote_subst"`' always_export_symbols='`$ECHO "$always_export_symbols" | $SED "$delay_single_quote_subst"`' export_symbols_cmds='`$ECHO "$export_symbols_cmds" | $SED "$delay_single_quote_subst"`' exclude_expsyms='`$ECHO "$exclude_expsyms" | $SED "$delay_single_quote_subst"`' include_expsyms='`$ECHO "$include_expsyms" | $SED "$delay_single_quote_subst"`' prelink_cmds='`$ECHO "$prelink_cmds" | $SED "$delay_single_quote_subst"`' postlink_cmds='`$ECHO "$postlink_cmds" | $SED "$delay_single_quote_subst"`' file_list_spec='`$ECHO "$file_list_spec" | $SED "$delay_single_quote_subst"`' variables_saved_for_relink='`$ECHO "$variables_saved_for_relink" | $SED "$delay_single_quote_subst"`' need_lib_prefix='`$ECHO "$need_lib_prefix" | $SED "$delay_single_quote_subst"`' need_version='`$ECHO "$need_version" | $SED "$delay_single_quote_subst"`' version_type='`$ECHO "$version_type" | $SED "$delay_single_quote_subst"`' runpath_var='`$ECHO "$runpath_var" | $SED "$delay_single_quote_subst"`' shlibpath_var='`$ECHO "$shlibpath_var" | $SED "$delay_single_quote_subst"`' shlibpath_overrides_runpath='`$ECHO "$shlibpath_overrides_runpath" | $SED "$delay_single_quote_subst"`' libname_spec='`$ECHO "$libname_spec" | $SED "$delay_single_quote_subst"`' library_names_spec='`$ECHO "$library_names_spec" | $SED "$delay_single_quote_subst"`' soname_spec='`$ECHO "$soname_spec" | $SED "$delay_single_quote_subst"`' install_override_mode='`$ECHO "$install_override_mode" | $SED "$delay_single_quote_subst"`' postinstall_cmds='`$ECHO "$postinstall_cmds" | $SED "$delay_single_quote_subst"`' postuninstall_cmds='`$ECHO "$postuninstall_cmds" | $SED "$delay_single_quote_subst"`' finish_cmds='`$ECHO "$finish_cmds" | $SED "$delay_single_quote_subst"`' finish_eval='`$ECHO "$finish_eval" | $SED "$delay_single_quote_subst"`' hardcode_into_libs='`$ECHO "$hardcode_into_libs" | $SED "$delay_single_quote_subst"`' sys_lib_search_path_spec='`$ECHO "$sys_lib_search_path_spec" | $SED "$delay_single_quote_subst"`' sys_lib_dlsearch_path_spec='`$ECHO "$sys_lib_dlsearch_path_spec" | $SED "$delay_single_quote_subst"`' hardcode_action='`$ECHO "$hardcode_action" | $SED "$delay_single_quote_subst"`' enable_dlopen='`$ECHO "$enable_dlopen" | $SED "$delay_single_quote_subst"`' enable_dlopen_self='`$ECHO "$enable_dlopen_self" | $SED "$delay_single_quote_subst"`' enable_dlopen_self_static='`$ECHO "$enable_dlopen_self_static" | $SED "$delay_single_quote_subst"`' old_striplib='`$ECHO "$old_striplib" | $SED "$delay_single_quote_subst"`' striplib='`$ECHO "$striplib" | $SED "$delay_single_quote_subst"`' compiler_lib_search_dirs='`$ECHO "$compiler_lib_search_dirs" | $SED "$delay_single_quote_subst"`' predep_objects='`$ECHO "$predep_objects" | $SED "$delay_single_quote_subst"`' postdep_objects='`$ECHO "$postdep_objects" | $SED "$delay_single_quote_subst"`' predeps='`$ECHO "$predeps" | $SED "$delay_single_quote_subst"`' postdeps='`$ECHO "$postdeps" | $SED "$delay_single_quote_subst"`' compiler_lib_search_path='`$ECHO "$compiler_lib_search_path" | $SED "$delay_single_quote_subst"`' LD_CXX='`$ECHO "$LD_CXX" | $SED "$delay_single_quote_subst"`' reload_flag_CXX='`$ECHO "$reload_flag_CXX" | $SED "$delay_single_quote_subst"`' reload_cmds_CXX='`$ECHO "$reload_cmds_CXX" | $SED "$delay_single_quote_subst"`' old_archive_cmds_CXX='`$ECHO "$old_archive_cmds_CXX" | $SED "$delay_single_quote_subst"`' compiler_CXX='`$ECHO "$compiler_CXX" | $SED "$delay_single_quote_subst"`' GCC_CXX='`$ECHO "$GCC_CXX" | $SED "$delay_single_quote_subst"`' lt_prog_compiler_no_builtin_flag_CXX='`$ECHO "$lt_prog_compiler_no_builtin_flag_CXX" | $SED "$delay_single_quote_subst"`' lt_prog_compiler_pic_CXX='`$ECHO "$lt_prog_compiler_pic_CXX" | $SED "$delay_single_quote_subst"`' lt_prog_compiler_wl_CXX='`$ECHO "$lt_prog_compiler_wl_CXX" | $SED "$delay_single_quote_subst"`' lt_prog_compiler_static_CXX='`$ECHO "$lt_prog_compiler_static_CXX" | $SED "$delay_single_quote_subst"`' lt_cv_prog_compiler_c_o_CXX='`$ECHO "$lt_cv_prog_compiler_c_o_CXX" | $SED "$delay_single_quote_subst"`' archive_cmds_need_lc_CXX='`$ECHO "$archive_cmds_need_lc_CXX" | $SED "$delay_single_quote_subst"`' enable_shared_with_static_runtimes_CXX='`$ECHO "$enable_shared_with_static_runtimes_CXX" | $SED "$delay_single_quote_subst"`' export_dynamic_flag_spec_CXX='`$ECHO "$export_dynamic_flag_spec_CXX" | $SED "$delay_single_quote_subst"`' whole_archive_flag_spec_CXX='`$ECHO "$whole_archive_flag_spec_CXX" | $SED "$delay_single_quote_subst"`' compiler_needs_object_CXX='`$ECHO "$compiler_needs_object_CXX" | $SED "$delay_single_quote_subst"`' old_archive_from_new_cmds_CXX='`$ECHO "$old_archive_from_new_cmds_CXX" | $SED "$delay_single_quote_subst"`' old_archive_from_expsyms_cmds_CXX='`$ECHO "$old_archive_from_expsyms_cmds_CXX" | $SED "$delay_single_quote_subst"`' archive_cmds_CXX='`$ECHO "$archive_cmds_CXX" | $SED "$delay_single_quote_subst"`' archive_expsym_cmds_CXX='`$ECHO "$archive_expsym_cmds_CXX" | $SED "$delay_single_quote_subst"`' module_cmds_CXX='`$ECHO "$module_cmds_CXX" | $SED "$delay_single_quote_subst"`' module_expsym_cmds_CXX='`$ECHO "$module_expsym_cmds_CXX" | $SED "$delay_single_quote_subst"`' with_gnu_ld_CXX='`$ECHO "$with_gnu_ld_CXX" | $SED "$delay_single_quote_subst"`' allow_undefined_flag_CXX='`$ECHO "$allow_undefined_flag_CXX" | $SED "$delay_single_quote_subst"`' no_undefined_flag_CXX='`$ECHO "$no_undefined_flag_CXX" | $SED "$delay_single_quote_subst"`' hardcode_libdir_flag_spec_CXX='`$ECHO "$hardcode_libdir_flag_spec_CXX" | $SED "$delay_single_quote_subst"`' hardcode_libdir_separator_CXX='`$ECHO "$hardcode_libdir_separator_CXX" | $SED "$delay_single_quote_subst"`' hardcode_direct_CXX='`$ECHO "$hardcode_direct_CXX" | $SED "$delay_single_quote_subst"`' hardcode_direct_absolute_CXX='`$ECHO "$hardcode_direct_absolute_CXX" | $SED "$delay_single_quote_subst"`' hardcode_minus_L_CXX='`$ECHO "$hardcode_minus_L_CXX" | $SED "$delay_single_quote_subst"`' hardcode_shlibpath_var_CXX='`$ECHO "$hardcode_shlibpath_var_CXX" | $SED "$delay_single_quote_subst"`' hardcode_automatic_CXX='`$ECHO "$hardcode_automatic_CXX" | $SED "$delay_single_quote_subst"`' inherit_rpath_CXX='`$ECHO "$inherit_rpath_CXX" | $SED "$delay_single_quote_subst"`' link_all_deplibs_CXX='`$ECHO "$link_all_deplibs_CXX" | $SED "$delay_single_quote_subst"`' always_export_symbols_CXX='`$ECHO "$always_export_symbols_CXX" | $SED "$delay_single_quote_subst"`' export_symbols_cmds_CXX='`$ECHO "$export_symbols_cmds_CXX" | $SED "$delay_single_quote_subst"`' exclude_expsyms_CXX='`$ECHO "$exclude_expsyms_CXX" | $SED "$delay_single_quote_subst"`' include_expsyms_CXX='`$ECHO "$include_expsyms_CXX" | $SED "$delay_single_quote_subst"`' prelink_cmds_CXX='`$ECHO "$prelink_cmds_CXX" | $SED "$delay_single_quote_subst"`' postlink_cmds_CXX='`$ECHO "$postlink_cmds_CXX" | $SED "$delay_single_quote_subst"`' file_list_spec_CXX='`$ECHO "$file_list_spec_CXX" | $SED "$delay_single_quote_subst"`' hardcode_action_CXX='`$ECHO "$hardcode_action_CXX" | $SED "$delay_single_quote_subst"`' compiler_lib_search_dirs_CXX='`$ECHO "$compiler_lib_search_dirs_CXX" | $SED "$delay_single_quote_subst"`' predep_objects_CXX='`$ECHO "$predep_objects_CXX" | $SED "$delay_single_quote_subst"`' postdep_objects_CXX='`$ECHO "$postdep_objects_CXX" | $SED "$delay_single_quote_subst"`' predeps_CXX='`$ECHO "$predeps_CXX" | $SED "$delay_single_quote_subst"`' postdeps_CXX='`$ECHO "$postdeps_CXX" | $SED "$delay_single_quote_subst"`' compiler_lib_search_path_CXX='`$ECHO "$compiler_lib_search_path_CXX" | $SED "$delay_single_quote_subst"`' LTCC='$LTCC' LTCFLAGS='$LTCFLAGS' compiler='$compiler_DEFAULT' # A function that is used when there is no print builtin or printf. func_fallback_echo () { eval 'cat <<_LTECHO_EOF \$1 _LTECHO_EOF' } # Quote evaled strings. for var in SHELL \ ECHO \ PATH_SEPARATOR \ SED \ GREP \ EGREP \ FGREP \ LD \ NM \ LN_S \ lt_SP2NL \ lt_NL2SP \ reload_flag \ OBJDUMP \ deplibs_check_method \ file_magic_cmd \ file_magic_glob \ want_nocaseglob \ DLLTOOL \ sharedlib_from_linklib_cmd \ AR \ AR_FLAGS \ archiver_list_spec \ STRIP \ RANLIB \ CC \ CFLAGS \ compiler \ lt_cv_sys_global_symbol_pipe \ lt_cv_sys_global_symbol_to_cdecl \ lt_cv_sys_global_symbol_to_c_name_address \ lt_cv_sys_global_symbol_to_c_name_address_lib_prefix \ nm_file_list_spec \ lt_prog_compiler_no_builtin_flag \ lt_prog_compiler_pic \ lt_prog_compiler_wl \ lt_prog_compiler_static \ lt_cv_prog_compiler_c_o \ need_locks \ MANIFEST_TOOL \ DSYMUTIL \ NMEDIT \ LIPO \ OTOOL \ OTOOL64 \ shrext_cmds \ export_dynamic_flag_spec \ whole_archive_flag_spec \ compiler_needs_object \ with_gnu_ld \ allow_undefined_flag \ no_undefined_flag \ hardcode_libdir_flag_spec \ hardcode_libdir_separator \ exclude_expsyms \ include_expsyms \ file_list_spec \ variables_saved_for_relink \ libname_spec \ library_names_spec \ soname_spec \ install_override_mode \ finish_eval \ old_striplib \ striplib \ compiler_lib_search_dirs \ predep_objects \ postdep_objects \ predeps \ postdeps \ compiler_lib_search_path \ LD_CXX \ reload_flag_CXX \ compiler_CXX \ lt_prog_compiler_no_builtin_flag_CXX \ lt_prog_compiler_pic_CXX \ lt_prog_compiler_wl_CXX \ lt_prog_compiler_static_CXX \ lt_cv_prog_compiler_c_o_CXX \ export_dynamic_flag_spec_CXX \ whole_archive_flag_spec_CXX \ compiler_needs_object_CXX \ with_gnu_ld_CXX \ allow_undefined_flag_CXX \ no_undefined_flag_CXX \ hardcode_libdir_flag_spec_CXX \ hardcode_libdir_separator_CXX \ exclude_expsyms_CXX \ include_expsyms_CXX \ file_list_spec_CXX \ compiler_lib_search_dirs_CXX \ predep_objects_CXX \ postdep_objects_CXX \ predeps_CXX \ postdeps_CXX \ compiler_lib_search_path_CXX; do case \`eval \\\\\$ECHO \\\\""\\\\\$\$var"\\\\"\` in *[\\\\\\\`\\"\\\$]*) eval "lt_\$var=\\\\\\"\\\`\\\$ECHO \\"\\\$\$var\\" | \\\$SED \\"\\\$sed_quote_subst\\"\\\`\\\\\\"" ;; *) eval "lt_\$var=\\\\\\"\\\$\$var\\\\\\"" ;; esac done # Double-quote double-evaled strings. for var in reload_cmds \ old_postinstall_cmds \ old_postuninstall_cmds \ old_archive_cmds \ extract_expsyms_cmds \ old_archive_from_new_cmds \ old_archive_from_expsyms_cmds \ archive_cmds \ archive_expsym_cmds \ module_cmds \ module_expsym_cmds \ export_symbols_cmds \ prelink_cmds \ postlink_cmds \ postinstall_cmds \ postuninstall_cmds \ finish_cmds \ sys_lib_search_path_spec \ sys_lib_dlsearch_path_spec \ reload_cmds_CXX \ old_archive_cmds_CXX \ old_archive_from_new_cmds_CXX \ old_archive_from_expsyms_cmds_CXX \ archive_cmds_CXX \ archive_expsym_cmds_CXX \ module_cmds_CXX \ module_expsym_cmds_CXX \ export_symbols_cmds_CXX \ prelink_cmds_CXX \ postlink_cmds_CXX; do case \`eval \\\\\$ECHO \\\\""\\\\\$\$var"\\\\"\` in *[\\\\\\\`\\"\\\$]*) eval "lt_\$var=\\\\\\"\\\`\\\$ECHO \\"\\\$\$var\\" | \\\$SED -e \\"\\\$double_quote_subst\\" -e \\"\\\$sed_quote_subst\\" -e \\"\\\$delay_variable_subst\\"\\\`\\\\\\"" ;; *) eval "lt_\$var=\\\\\\"\\\$\$var\\\\\\"" ;; esac done ac_aux_dir='$ac_aux_dir' xsi_shell='$xsi_shell' lt_shell_append='$lt_shell_append' # See if we are running on zsh, and set the options which allow our # commands through without removal of \ escapes INIT. if test -n "\${ZSH_VERSION+set}" ; then setopt NO_GLOB_SUBST fi PACKAGE='$PACKAGE' VERSION='$VERSION' TIMESTAMP='$TIMESTAMP' RM='$RM' ofile='$ofile' _ACEOF cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 # Handling of arguments. for ac_config_target in $ac_config_targets do case $ac_config_target in "config.h") CONFIG_HEADERS="$CONFIG_HEADERS config.h" ;; "depfiles") CONFIG_COMMANDS="$CONFIG_COMMANDS depfiles" ;; "libtool") CONFIG_COMMANDS="$CONFIG_COMMANDS libtool" ;; "libstoragemgmt.pc") CONFIG_FILES="$CONFIG_FILES libstoragemgmt.pc" ;; "Makefile") CONFIG_FILES="$CONFIG_FILES Makefile" ;; "c_binding/Makefile") CONFIG_FILES="$CONFIG_FILES c_binding/Makefile" ;; "c_binding/include/Makefile") CONFIG_FILES="$CONFIG_FILES c_binding/include/Makefile" ;; "c_binding/include/libstoragemgmt/Makefile") CONFIG_FILES="$CONFIG_FILES c_binding/include/libstoragemgmt/Makefile" ;; "c_binding/include/libstoragemgmt/libstoragemgmt_version.h") CONFIG_FILES="$CONFIG_FILES c_binding/include/libstoragemgmt/libstoragemgmt_version.h" ;; "python_binding/Makefile") CONFIG_FILES="$CONFIG_FILES python_binding/Makefile" ;; "python_binding/lsm/version.py") CONFIG_FILES="$CONFIG_FILES python_binding/lsm/version.py" ;; "plugin/Makefile") CONFIG_FILES="$CONFIG_FILES plugin/Makefile" ;; "plugin/simc/Makefile") CONFIG_FILES="$CONFIG_FILES plugin/simc/Makefile" ;; "plugin/megaraid/Makefile") CONFIG_FILES="$CONFIG_FILES plugin/megaraid/Makefile" ;; "plugin/hpsa/Makefile") CONFIG_FILES="$CONFIG_FILES plugin/hpsa/Makefile" ;; "daemon/Makefile") CONFIG_FILES="$CONFIG_FILES daemon/Makefile" ;; "config/Makefile") CONFIG_FILES="$CONFIG_FILES config/Makefile" ;; "doc/Makefile") CONFIG_FILES="$CONFIG_FILES doc/Makefile" ;; "doc/man/lsmcli.1") CONFIG_FILES="$CONFIG_FILES doc/man/lsmcli.1" ;; "doc/man/lsmd.1") CONFIG_FILES="$CONFIG_FILES doc/man/lsmd.1" ;; "doc/man/sim_lsmplugin.1") CONFIG_FILES="$CONFIG_FILES doc/man/sim_lsmplugin.1" ;; "doc/man/simc_lsmplugin.1") CONFIG_FILES="$CONFIG_FILES doc/man/simc_lsmplugin.1" ;; "doc/man/smispy_lsmplugin.1") CONFIG_FILES="$CONFIG_FILES doc/man/smispy_lsmplugin.1" ;; "doc/man/ontap_lsmplugin.1") CONFIG_FILES="$CONFIG_FILES doc/man/ontap_lsmplugin.1" ;; "doc/man/targetd_lsmplugin.1") CONFIG_FILES="$CONFIG_FILES doc/man/targetd_lsmplugin.1" ;; "doc/man/nstor_lsmplugin.1") CONFIG_FILES="$CONFIG_FILES doc/man/nstor_lsmplugin.1" ;; "doc/doxygen.conf") CONFIG_FILES="$CONFIG_FILES doc/doxygen.conf" ;; "doc/man/lsmd.conf.5") CONFIG_FILES="$CONFIG_FILES doc/man/lsmd.conf.5" ;; "doc/man/megaraid_lsmplugin.1") CONFIG_FILES="$CONFIG_FILES doc/man/megaraid_lsmplugin.1" ;; "doc/man/hpsa_lsmplugin.1") CONFIG_FILES="$CONFIG_FILES doc/man/hpsa_lsmplugin.1" ;; "tools/Makefile") CONFIG_FILES="$CONFIG_FILES tools/Makefile" ;; "tools/udev/Makefile") CONFIG_FILES="$CONFIG_FILES tools/udev/Makefile" ;; "tools/lsmcli/Makefile") CONFIG_FILES="$CONFIG_FILES tools/lsmcli/Makefile" ;; "tools/utility/Makefile") CONFIG_FILES="$CONFIG_FILES tools/utility/Makefile" ;; "tools/bash_completion/Makefile") CONFIG_FILES="$CONFIG_FILES tools/bash_completion/Makefile" ;; "packaging/Makefile") CONFIG_FILES="$CONFIG_FILES packaging/Makefile" ;; "packaging/daemon/Makefile") CONFIG_FILES="$CONFIG_FILES packaging/daemon/Makefile" ;; "packaging/libstoragemgmt.spec") CONFIG_FILES="$CONFIG_FILES packaging/libstoragemgmt.spec" ;; "doc/man/Makefile") CONFIG_FILES="$CONFIG_FILES doc/man/Makefile" ;; "test/Makefile") CONFIG_FILES="$CONFIG_FILES test/Makefile" ;; *) as_fn_error $? "invalid argument: \`$ac_config_target'" "$LINENO" 5;; esac done # If the user did not use the arguments to specify the items to instantiate, # then the envvar interface is used. Set only those that are not. # We use the long form for the default assignment because of an extremely # bizarre bug on SunOS 4.1.3. if $ac_need_defaults; then test "${CONFIG_FILES+set}" = set || CONFIG_FILES=$config_files test "${CONFIG_HEADERS+set}" = set || CONFIG_HEADERS=$config_headers test "${CONFIG_COMMANDS+set}" = set || CONFIG_COMMANDS=$config_commands fi # Have a temporary directory for convenience. Make it in the build tree # simply because there is no reason against having it here, and in addition, # creating and moving files from /tmp can sometimes cause problems. # Hook for its removal unless debugging. # Note that there is a small window in which the directory will not be cleaned: # after its creation but before its name has been assigned to `$tmp'. $debug || { tmp= ac_tmp= trap 'exit_status=$? : "${ac_tmp:=$tmp}" { test ! -d "$ac_tmp" || rm -fr "$ac_tmp"; } && exit $exit_status ' 0 trap 'as_fn_exit 1' 1 2 13 15 } # Create a (secure) tmp directory for tmp files. { tmp=`(umask 077 && mktemp -d "./confXXXXXX") 2>/dev/null` && test -d "$tmp" } || { tmp=./conf$$-$RANDOM (umask 077 && mkdir "$tmp") } || as_fn_error $? "cannot create a temporary directory in ." "$LINENO" 5 ac_tmp=$tmp # Set up the scripts for CONFIG_FILES section. # No need to generate them if there are no CONFIG_FILES. # This happens for instance with `./config.status config.h'. if test -n "$CONFIG_FILES"; then ac_cr=`echo X | tr X '\015'` # On cygwin, bash can eat \r inside `` if the user requested igncr. # But we know of no other shell where ac_cr would be empty at this # point, so we can use a bashism as a fallback. if test "x$ac_cr" = x; then eval ac_cr=\$\'\\r\' fi ac_cs_awk_cr=`$AWK 'BEGIN { print "a\rb" }' /dev/null` if test "$ac_cs_awk_cr" = "a${ac_cr}b"; then ac_cs_awk_cr='\\r' else ac_cs_awk_cr=$ac_cr fi echo 'BEGIN {' >"$ac_tmp/subs1.awk" && _ACEOF { echo "cat >conf$$subs.awk <<_ACEOF" && echo "$ac_subst_vars" | sed 's/.*/&!$&$ac_delim/' && echo "_ACEOF" } >conf$$subs.sh || as_fn_error $? "could not make $CONFIG_STATUS" "$LINENO" 5 ac_delim_num=`echo "$ac_subst_vars" | grep -c '^'` ac_delim='%!_!# ' for ac_last_try in false false false false false :; do . ./conf$$subs.sh || as_fn_error $? "could not make $CONFIG_STATUS" "$LINENO" 5 ac_delim_n=`sed -n "s/.*$ac_delim\$/X/p" conf$$subs.awk | grep -c X` if test $ac_delim_n = $ac_delim_num; then break elif $ac_last_try; then as_fn_error $? "could not make $CONFIG_STATUS" "$LINENO" 5 else ac_delim="$ac_delim!$ac_delim _$ac_delim!! " fi done rm -f conf$$subs.sh cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 cat >>"\$ac_tmp/subs1.awk" <<\\_ACAWK && _ACEOF sed -n ' h s/^/S["/; s/!.*/"]=/ p g s/^[^!]*!// :repl t repl s/'"$ac_delim"'$// t delim :nl h s/\(.\{148\}\)..*/\1/ t more1 s/["\\]/\\&/g; s/^/"/; s/$/\\n"\\/ p n b repl :more1 s/["\\]/\\&/g; s/^/"/; s/$/"\\/ p g s/.\{148\}// t nl :delim h s/\(.\{148\}\)..*/\1/ t more2 s/["\\]/\\&/g; s/^/"/; s/$/"/ p b :more2 s/["\\]/\\&/g; s/^/"/; s/$/"\\/ p g s/.\{148\}// t delim ' >$CONFIG_STATUS || ac_write_fail=1 rm -f conf$$subs.awk cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 _ACAWK cat >>"\$ac_tmp/subs1.awk" <<_ACAWK && for (key in S) S_is_set[key] = 1 FS = "" } { line = $ 0 nfields = split(line, field, "@") substed = 0 len = length(field[1]) for (i = 2; i < nfields; i++) { key = field[i] keylen = length(key) if (S_is_set[key]) { value = S[key] line = substr(line, 1, len) "" value "" substr(line, len + keylen + 3) len += length(value) + length(field[++i]) substed = 1 } else len += 1 + keylen } print line } _ACAWK _ACEOF cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 if sed "s/$ac_cr//" < /dev/null > /dev/null 2>&1; then sed "s/$ac_cr\$//; s/$ac_cr/$ac_cs_awk_cr/g" else cat fi < "$ac_tmp/subs1.awk" > "$ac_tmp/subs.awk" \ || as_fn_error $? "could not setup config files machinery" "$LINENO" 5 _ACEOF # VPATH may cause trouble with some makes, so we remove sole $(srcdir), # ${srcdir} and @srcdir@ entries from VPATH if srcdir is ".", strip leading and # trailing colons and then remove the whole line if VPATH becomes empty # (actually we leave an empty line to preserve line numbers). if test "x$srcdir" = x.; then ac_vpsub='/^[ ]*VPATH[ ]*=[ ]*/{ h s/// s/^/:/ s/[ ]*$/:/ s/:\$(srcdir):/:/g s/:\${srcdir}:/:/g s/:@srcdir@:/:/g s/^:*// s/:*$// x s/\(=[ ]*\).*/\1/ G s/\n// s/^[^=]*=[ ]*$// }' fi cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 fi # test -n "$CONFIG_FILES" # Set up the scripts for CONFIG_HEADERS section. # No need to generate them if there are no CONFIG_HEADERS. # This happens for instance with `./config.status Makefile'. if test -n "$CONFIG_HEADERS"; then cat >"$ac_tmp/defines.awk" <<\_ACAWK || BEGIN { _ACEOF # Transform confdefs.h into an awk script `defines.awk', embedded as # here-document in config.status, that substitutes the proper values into # config.h.in to produce config.h. # Create a delimiter string that does not exist in confdefs.h, to ease # handling of long lines. ac_delim='%!_!# ' for ac_last_try in false false :; do ac_tt=`sed -n "/$ac_delim/p" confdefs.h` if test -z "$ac_tt"; then break elif $ac_last_try; then as_fn_error $? "could not make $CONFIG_HEADERS" "$LINENO" 5 else ac_delim="$ac_delim!$ac_delim _$ac_delim!! " fi done # For the awk script, D is an array of macro values keyed by name, # likewise P contains macro parameters if any. Preserve backslash # newline sequences. ac_word_re=[_$as_cr_Letters][_$as_cr_alnum]* sed -n ' s/.\{148\}/&'"$ac_delim"'/g t rset :rset s/^[ ]*#[ ]*define[ ][ ]*/ / t def d :def s/\\$// t bsnl s/["\\]/\\&/g s/^ \('"$ac_word_re"'\)\(([^()]*)\)[ ]*\(.*\)/P["\1"]="\2"\ D["\1"]=" \3"/p s/^ \('"$ac_word_re"'\)[ ]*\(.*\)/D["\1"]=" \2"/p d :bsnl s/["\\]/\\&/g s/^ \('"$ac_word_re"'\)\(([^()]*)\)[ ]*\(.*\)/P["\1"]="\2"\ D["\1"]=" \3\\\\\\n"\\/p t cont s/^ \('"$ac_word_re"'\)[ ]*\(.*\)/D["\1"]=" \2\\\\\\n"\\/p t cont d :cont n s/.\{148\}/&'"$ac_delim"'/g t clear :clear s/\\$// t bsnlc s/["\\]/\\&/g; s/^/"/; s/$/"/p d :bsnlc s/["\\]/\\&/g; s/^/"/; s/$/\\\\\\n"\\/p b cont ' >$CONFIG_STATUS || ac_write_fail=1 cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 for (key in D) D_is_set[key] = 1 FS = "" } /^[\t ]*#[\t ]*(define|undef)[\t ]+$ac_word_re([\t (]|\$)/ { line = \$ 0 split(line, arg, " ") if (arg[1] == "#") { defundef = arg[2] mac1 = arg[3] } else { defundef = substr(arg[1], 2) mac1 = arg[2] } split(mac1, mac2, "(") #) macro = mac2[1] prefix = substr(line, 1, index(line, defundef) - 1) if (D_is_set[macro]) { # Preserve the white space surrounding the "#". print prefix "define", macro P[macro] D[macro] next } else { # Replace #undef with comments. This is necessary, for example, # in the case of _POSIX_SOURCE, which is predefined and required # on some systems where configure will not decide to define it. if (defundef == "undef") { print "/*", prefix defundef, macro, "*/" next } } } { print } _ACAWK _ACEOF cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 as_fn_error $? "could not setup config headers machinery" "$LINENO" 5 fi # test -n "$CONFIG_HEADERS" eval set X " :F $CONFIG_FILES :H $CONFIG_HEADERS :C $CONFIG_COMMANDS" shift for ac_tag do case $ac_tag in :[FHLC]) ac_mode=$ac_tag; continue;; esac case $ac_mode$ac_tag in :[FHL]*:*);; :L* | :C*:*) as_fn_error $? "invalid tag \`$ac_tag'" "$LINENO" 5;; :[FH]-) ac_tag=-:-;; :[FH]*) ac_tag=$ac_tag:$ac_tag.in;; esac ac_save_IFS=$IFS IFS=: set x $ac_tag IFS=$ac_save_IFS shift ac_file=$1 shift case $ac_mode in :L) ac_source=$1;; :[FH]) ac_file_inputs= for ac_f do case $ac_f in -) ac_f="$ac_tmp/stdin";; *) # Look for the file first in the build tree, then in the source tree # (if the path is not absolute). The absolute path cannot be DOS-style, # because $ac_f cannot contain `:'. test -f "$ac_f" || case $ac_f in [\\/$]*) false;; *) test -f "$srcdir/$ac_f" && ac_f="$srcdir/$ac_f";; esac || as_fn_error 1 "cannot find input file: \`$ac_f'" "$LINENO" 5;; esac case $ac_f in *\'*) ac_f=`$as_echo "$ac_f" | sed "s/'/'\\\\\\\\''/g"`;; esac as_fn_append ac_file_inputs " '$ac_f'" done # Let's still pretend it is `configure' which instantiates (i.e., don't # use $as_me), people would be surprised to read: # /* config.h. Generated by config.status. */ configure_input='Generated from '` $as_echo "$*" | sed 's|^[^:]*/||;s|:[^:]*/|, |g' `' by configure.' if test x"$ac_file" != x-; then configure_input="$ac_file. $configure_input" { $as_echo "$as_me:${as_lineno-$LINENO}: creating $ac_file" >&5 $as_echo "$as_me: creating $ac_file" >&6;} fi # Neutralize special characters interpreted by sed in replacement strings. case $configure_input in #( *\&* | *\|* | *\\* ) ac_sed_conf_input=`$as_echo "$configure_input" | sed 's/[\\\\&|]/\\\\&/g'`;; #( *) ac_sed_conf_input=$configure_input;; esac case $ac_tag in *:-:* | *:-) cat >"$ac_tmp/stdin" \ || as_fn_error $? "could not create $ac_file" "$LINENO" 5 ;; esac ;; esac ac_dir=`$as_dirname -- "$ac_file" || $as_expr X"$ac_file" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ X"$ac_file" : 'X\(//\)[^/]' \| \ X"$ac_file" : 'X\(//\)$' \| \ X"$ac_file" : 'X\(/\)' \| . 2>/dev/null || $as_echo X"$ac_file" | sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ s//\1/ q } /^X\(\/\/\)[^/].*/{ s//\1/ q } /^X\(\/\/\)$/{ s//\1/ q } /^X\(\/\).*/{ s//\1/ q } s/.*/./; q'` as_dir="$ac_dir"; as_fn_mkdir_p ac_builddir=. case "$ac_dir" in .) ac_dir_suffix= ac_top_builddir_sub=. ac_top_build_prefix= ;; *) ac_dir_suffix=/`$as_echo "$ac_dir" | sed 's|^\.[\\/]||'` # A ".." for each directory in $ac_dir_suffix. ac_top_builddir_sub=`$as_echo "$ac_dir_suffix" | sed 's|/[^\\/]*|/..|g;s|/||'` case $ac_top_builddir_sub in "") ac_top_builddir_sub=. ac_top_build_prefix= ;; *) ac_top_build_prefix=$ac_top_builddir_sub/ ;; esac ;; esac ac_abs_top_builddir=$ac_pwd ac_abs_builddir=$ac_pwd$ac_dir_suffix # for backward compatibility: ac_top_builddir=$ac_top_build_prefix case $srcdir in .) # We are building in place. ac_srcdir=. ac_top_srcdir=$ac_top_builddir_sub ac_abs_top_srcdir=$ac_pwd ;; [\\/]* | ?:[\\/]* ) # Absolute name. ac_srcdir=$srcdir$ac_dir_suffix; ac_top_srcdir=$srcdir ac_abs_top_srcdir=$srcdir ;; *) # Relative name. ac_srcdir=$ac_top_build_prefix$srcdir$ac_dir_suffix ac_top_srcdir=$ac_top_build_prefix$srcdir ac_abs_top_srcdir=$ac_pwd/$srcdir ;; esac ac_abs_srcdir=$ac_abs_top_srcdir$ac_dir_suffix case $ac_mode in :F) # # CONFIG_FILE # case $INSTALL in [\\/$]* | ?:[\\/]* ) ac_INSTALL=$INSTALL ;; *) ac_INSTALL=$ac_top_build_prefix$INSTALL ;; esac ac_MKDIR_P=$MKDIR_P case $MKDIR_P in [\\/$]* | ?:[\\/]* ) ;; */*) ac_MKDIR_P=$ac_top_build_prefix$MKDIR_P ;; esac _ACEOF cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 # If the template does not know about datarootdir, expand it. # FIXME: This hack should be removed a few years after 2.60. ac_datarootdir_hack=; ac_datarootdir_seen= ac_sed_dataroot=' /datarootdir/ { p q } /@datadir@/p /@docdir@/p /@infodir@/p /@localedir@/p /@mandir@/p' case `eval "sed -n \"\$ac_sed_dataroot\" $ac_file_inputs"` in *datarootdir*) ac_datarootdir_seen=yes;; *@datadir@*|*@docdir@*|*@infodir@*|*@localedir@*|*@mandir@*) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $ac_file_inputs seems to ignore the --datarootdir setting" >&5 $as_echo "$as_me: WARNING: $ac_file_inputs seems to ignore the --datarootdir setting" >&2;} _ACEOF cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 ac_datarootdir_hack=' s&@datadir@&$datadir&g s&@docdir@&$docdir&g s&@infodir@&$infodir&g s&@localedir@&$localedir&g s&@mandir@&$mandir&g s&\\\${datarootdir}&$datarootdir&g' ;; esac _ACEOF # Neutralize VPATH when `$srcdir' = `.'. # Shell code in configure.ac might set extrasub. # FIXME: do we really want to maintain this feature? cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 ac_sed_extra="$ac_vpsub $extrasub _ACEOF cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 :t /@[a-zA-Z_][a-zA-Z_0-9]*@/!b s|@configure_input@|$ac_sed_conf_input|;t t s&@top_builddir@&$ac_top_builddir_sub&;t t s&@top_build_prefix@&$ac_top_build_prefix&;t t s&@srcdir@&$ac_srcdir&;t t s&@abs_srcdir@&$ac_abs_srcdir&;t t s&@top_srcdir@&$ac_top_srcdir&;t t s&@abs_top_srcdir@&$ac_abs_top_srcdir&;t t s&@builddir@&$ac_builddir&;t t s&@abs_builddir@&$ac_abs_builddir&;t t s&@abs_top_builddir@&$ac_abs_top_builddir&;t t s&@INSTALL@&$ac_INSTALL&;t t s&@MKDIR_P@&$ac_MKDIR_P&;t t $ac_datarootdir_hack " eval sed \"\$ac_sed_extra\" "$ac_file_inputs" | $AWK -f "$ac_tmp/subs.awk" \ >$ac_tmp/out || as_fn_error $? "could not create $ac_file" "$LINENO" 5 test -z "$ac_datarootdir_hack$ac_datarootdir_seen" && { ac_out=`sed -n '/\${datarootdir}/p' "$ac_tmp/out"`; test -n "$ac_out"; } && { ac_out=`sed -n '/^[ ]*datarootdir[ ]*:*=/p' \ "$ac_tmp/out"`; test -z "$ac_out"; } && { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $ac_file contains a reference to the variable \`datarootdir' which seems to be undefined. Please make sure it is defined" >&5 $as_echo "$as_me: WARNING: $ac_file contains a reference to the variable \`datarootdir' which seems to be undefined. Please make sure it is defined" >&2;} rm -f "$ac_tmp/stdin" case $ac_file in -) cat "$ac_tmp/out" && rm -f "$ac_tmp/out";; *) rm -f "$ac_file" && mv "$ac_tmp/out" "$ac_file";; esac \ || as_fn_error $? "could not create $ac_file" "$LINENO" 5 ;; :H) # # CONFIG_HEADER # if test x"$ac_file" != x-; then { $as_echo "/* $configure_input */" \ && eval '$AWK -f "$ac_tmp/defines.awk"' "$ac_file_inputs" } >"$ac_tmp/config.h" \ || as_fn_error $? "could not create $ac_file" "$LINENO" 5 if diff "$ac_file" "$ac_tmp/config.h" >/dev/null 2>&1; then { $as_echo "$as_me:${as_lineno-$LINENO}: $ac_file is unchanged" >&5 $as_echo "$as_me: $ac_file is unchanged" >&6;} else rm -f "$ac_file" mv "$ac_tmp/config.h" "$ac_file" \ || as_fn_error $? "could not create $ac_file" "$LINENO" 5 fi else $as_echo "/* $configure_input */" \ && eval '$AWK -f "$ac_tmp/defines.awk"' "$ac_file_inputs" \ || as_fn_error $? "could not create -" "$LINENO" 5 fi # Compute "$ac_file"'s index in $config_headers. _am_arg="$ac_file" _am_stamp_count=1 for _am_header in $config_headers :; do case $_am_header in $_am_arg | $_am_arg:* ) break ;; * ) _am_stamp_count=`expr $_am_stamp_count + 1` ;; esac done echo "timestamp for $_am_arg" >`$as_dirname -- "$_am_arg" || $as_expr X"$_am_arg" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ X"$_am_arg" : 'X\(//\)[^/]' \| \ X"$_am_arg" : 'X\(//\)$' \| \ X"$_am_arg" : 'X\(/\)' \| . 2>/dev/null || $as_echo X"$_am_arg" | sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ s//\1/ q } /^X\(\/\/\)[^/].*/{ s//\1/ q } /^X\(\/\/\)$/{ s//\1/ q } /^X\(\/\).*/{ s//\1/ q } s/.*/./; q'`/stamp-h$_am_stamp_count ;; :C) { $as_echo "$as_me:${as_lineno-$LINENO}: executing $ac_file commands" >&5 $as_echo "$as_me: executing $ac_file commands" >&6;} ;; esac case $ac_file$ac_mode in "depfiles":C) test x"$AMDEP_TRUE" != x"" || { # Older Autoconf quotes --file arguments for eval, but not when files # are listed without --file. Let's play safe and only enable the eval # if we detect the quoting. case $CONFIG_FILES in *\'*) eval set x "$CONFIG_FILES" ;; *) set x $CONFIG_FILES ;; esac shift for mf do # Strip MF so we end up with the name of the file. mf=`echo "$mf" | sed -e 's/:.*$//'` # Check whether this is an Automake generated Makefile or not. # We used to match only the files named 'Makefile.in', but # some people rename them; so instead we look at the file content. # Grep'ing the first line is not enough: some people post-process # each Makefile.in and add a new line on top of each file to say so. # Grep'ing the whole file is not good either: AIX grep has a line # limit of 2048, but all sed's we know have understand at least 4000. if sed -n 's,^#.*generated by automake.*,X,p' "$mf" | grep X >/dev/null 2>&1; then dirpart=`$as_dirname -- "$mf" || $as_expr X"$mf" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ X"$mf" : 'X\(//\)[^/]' \| \ X"$mf" : 'X\(//\)$' \| \ X"$mf" : 'X\(/\)' \| . 2>/dev/null || $as_echo X"$mf" | sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ s//\1/ q } /^X\(\/\/\)[^/].*/{ s//\1/ q } /^X\(\/\/\)$/{ s//\1/ q } /^X\(\/\).*/{ s//\1/ q } s/.*/./; q'` else continue fi # Extract the definition of DEPDIR, am__include, and am__quote # from the Makefile without running 'make'. DEPDIR=`sed -n 's/^DEPDIR = //p' < "$mf"` test -z "$DEPDIR" && continue am__include=`sed -n 's/^am__include = //p' < "$mf"` test -z "$am__include" && continue am__quote=`sed -n 's/^am__quote = //p' < "$mf"` # Find all dependency output files, they are included files with # $(DEPDIR) in their names. We invoke sed twice because it is the # simplest approach to changing $(DEPDIR) to its actual value in the # expansion. for file in `sed -n " s/^$am__include $am__quote\(.*(DEPDIR).*\)$am__quote"'$/\1/p' <"$mf" | \ sed -e 's/\$(DEPDIR)/'"$DEPDIR"'/g'`; do # Make sure the directory exists. test -f "$dirpart/$file" && continue fdir=`$as_dirname -- "$file" || $as_expr X"$file" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ X"$file" : 'X\(//\)[^/]' \| \ X"$file" : 'X\(//\)$' \| \ X"$file" : 'X\(/\)' \| . 2>/dev/null || $as_echo X"$file" | sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ s//\1/ q } /^X\(\/\/\)[^/].*/{ s//\1/ q } /^X\(\/\/\)$/{ s//\1/ q } /^X\(\/\).*/{ s//\1/ q } s/.*/./; q'` as_dir=$dirpart/$fdir; as_fn_mkdir_p # echo "creating $dirpart/$file" echo '# dummy' > "$dirpart/$file" done done } ;; "libtool":C) # See if we are running on zsh, and set the options which allow our # commands through without removal of \ escapes. if test -n "${ZSH_VERSION+set}" ; then setopt NO_GLOB_SUBST fi cfgfile="${ofile}T" trap "$RM \"$cfgfile\"; exit 1" 1 2 15 $RM "$cfgfile" cat <<_LT_EOF >> "$cfgfile" #! $SHELL # `$ECHO "$ofile" | sed 's%^.*/%%'` - Provide generalized library-building support services. # Generated automatically by $as_me ($PACKAGE$TIMESTAMP) $VERSION # Libtool was configured on host `(hostname || uname -n) 2>/dev/null | sed 1q`: # NOTE: Changes made to this file will be lost: look at ltmain.sh. # # Copyright (C) 1996, 1997, 1998, 1999, 2000, 2001, 2003, 2004, 2005, # 2006, 2007, 2008, 2009, 2010, 2011 Free Software # Foundation, Inc. # Written by Gordon Matzigkeit, 1996 # # This file is part of GNU Libtool. # # GNU Libtool is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License as # published by the Free Software Foundation; either version 2 of # the License, or (at your option) any later version. # # As a special exception to the GNU General Public License, # if you distribute this file as part of a program or library that # is built using GNU Libtool, you may include this file under the # same distribution terms that you use for the rest of that program. # # GNU Libtool is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with GNU Libtool; see the file COPYING. If not, a copy # can be downloaded from http://www.gnu.org/licenses/gpl.html, or # obtained by writing to the Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. # The names of the tagged configurations supported by this script. available_tags="CXX " # ### BEGIN LIBTOOL CONFIG # Which release of libtool.m4 was used? macro_version=$macro_version macro_revision=$macro_revision # Whether or not to build shared libraries. build_libtool_libs=$enable_shared # Whether or not to build static libraries. build_old_libs=$enable_static # What type of objects to build. pic_mode=$pic_mode # Whether or not to optimize for fast installation. fast_install=$enable_fast_install # Shell to use when invoking shell scripts. SHELL=$lt_SHELL # An echo program that protects backslashes. ECHO=$lt_ECHO # The PATH separator for the build system. PATH_SEPARATOR=$lt_PATH_SEPARATOR # The host system. host_alias=$host_alias host=$host host_os=$host_os # The build system. build_alias=$build_alias build=$build build_os=$build_os # A sed program that does not truncate output. SED=$lt_SED # Sed that helps us avoid accidentally triggering echo(1) options like -n. Xsed="\$SED -e 1s/^X//" # A grep program that handles long lines. GREP=$lt_GREP # An ERE matcher. EGREP=$lt_EGREP # A literal string matcher. FGREP=$lt_FGREP # A BSD- or MS-compatible name lister. NM=$lt_NM # Whether we need soft or hard links. LN_S=$lt_LN_S # What is the maximum length of a command? max_cmd_len=$max_cmd_len # Object file suffix (normally "o"). objext=$ac_objext # Executable file suffix (normally ""). exeext=$exeext # whether the shell understands "unset". lt_unset=$lt_unset # turn spaces into newlines. SP2NL=$lt_lt_SP2NL # turn newlines into spaces. NL2SP=$lt_lt_NL2SP # convert \$build file names to \$host format. to_host_file_cmd=$lt_cv_to_host_file_cmd # convert \$build files to toolchain format. to_tool_file_cmd=$lt_cv_to_tool_file_cmd # An object symbol dumper. OBJDUMP=$lt_OBJDUMP # Method to check whether dependent libraries are shared objects. deplibs_check_method=$lt_deplibs_check_method # Command to use when deplibs_check_method = "file_magic". file_magic_cmd=$lt_file_magic_cmd # How to find potential files when deplibs_check_method = "file_magic". file_magic_glob=$lt_file_magic_glob # Find potential files using nocaseglob when deplibs_check_method = "file_magic". want_nocaseglob=$lt_want_nocaseglob # DLL creation program. DLLTOOL=$lt_DLLTOOL # Command to associate shared and link libraries. sharedlib_from_linklib_cmd=$lt_sharedlib_from_linklib_cmd # The archiver. AR=$lt_AR # Flags to create an archive. AR_FLAGS=$lt_AR_FLAGS # How to feed a file listing to the archiver. archiver_list_spec=$lt_archiver_list_spec # A symbol stripping program. STRIP=$lt_STRIP # Commands used to install an old-style archive. RANLIB=$lt_RANLIB old_postinstall_cmds=$lt_old_postinstall_cmds old_postuninstall_cmds=$lt_old_postuninstall_cmds # Whether to use a lock for old archive extraction. lock_old_archive_extraction=$lock_old_archive_extraction # A C compiler. LTCC=$lt_CC # LTCC compiler flags. LTCFLAGS=$lt_CFLAGS # Take the output of nm and produce a listing of raw symbols and C names. global_symbol_pipe=$lt_lt_cv_sys_global_symbol_pipe # Transform the output of nm in a proper C declaration. global_symbol_to_cdecl=$lt_lt_cv_sys_global_symbol_to_cdecl # Transform the output of nm in a C name address pair. global_symbol_to_c_name_address=$lt_lt_cv_sys_global_symbol_to_c_name_address # Transform the output of nm in a C name address pair when lib prefix is needed. global_symbol_to_c_name_address_lib_prefix=$lt_lt_cv_sys_global_symbol_to_c_name_address_lib_prefix # Specify filename containing input files for \$NM. nm_file_list_spec=$lt_nm_file_list_spec # The root where to search for dependent libraries,and in which our libraries should be installed. lt_sysroot=$lt_sysroot # The name of the directory that contains temporary libtool files. objdir=$objdir # Used to examine libraries when file_magic_cmd begins with "file". MAGIC_CMD=$MAGIC_CMD # Must we lock files when doing compilation? need_locks=$lt_need_locks # Manifest tool. MANIFEST_TOOL=$lt_MANIFEST_TOOL # Tool to manipulate archived DWARF debug symbol files on Mac OS X. DSYMUTIL=$lt_DSYMUTIL # Tool to change global to local symbols on Mac OS X. NMEDIT=$lt_NMEDIT # Tool to manipulate fat objects and archives on Mac OS X. LIPO=$lt_LIPO # ldd/readelf like tool for Mach-O binaries on Mac OS X. OTOOL=$lt_OTOOL # ldd/readelf like tool for 64 bit Mach-O binaries on Mac OS X 10.4. OTOOL64=$lt_OTOOL64 # Old archive suffix (normally "a"). libext=$libext # Shared library suffix (normally ".so"). shrext_cmds=$lt_shrext_cmds # The commands to extract the exported symbol list from a shared archive. extract_expsyms_cmds=$lt_extract_expsyms_cmds # Variables whose values should be saved in libtool wrapper scripts and # restored at link time. variables_saved_for_relink=$lt_variables_saved_for_relink # Do we need the "lib" prefix for modules? need_lib_prefix=$need_lib_prefix # Do we need a version for libraries? need_version=$need_version # Library versioning type. version_type=$version_type # Shared library runtime path variable. runpath_var=$runpath_var # Shared library path variable. shlibpath_var=$shlibpath_var # Is shlibpath searched before the hard-coded library search path? shlibpath_overrides_runpath=$shlibpath_overrides_runpath # Format of library name prefix. libname_spec=$lt_libname_spec # List of archive names. First name is the real one, the rest are links. # The last name is the one that the linker finds with -lNAME library_names_spec=$lt_library_names_spec # The coded name of the library, if different from the real name. soname_spec=$lt_soname_spec # Permission mode override for installation of shared libraries. install_override_mode=$lt_install_override_mode # Command to use after installation of a shared archive. postinstall_cmds=$lt_postinstall_cmds # Command to use after uninstallation of a shared archive. postuninstall_cmds=$lt_postuninstall_cmds # Commands used to finish a libtool library installation in a directory. finish_cmds=$lt_finish_cmds # As "finish_cmds", except a single script fragment to be evaled but # not shown. finish_eval=$lt_finish_eval # Whether we should hardcode library paths into libraries. hardcode_into_libs=$hardcode_into_libs # Compile-time system search path for libraries. sys_lib_search_path_spec=$lt_sys_lib_search_path_spec # Run-time system search path for libraries. sys_lib_dlsearch_path_spec=$lt_sys_lib_dlsearch_path_spec # Whether dlopen is supported. dlopen_support=$enable_dlopen # Whether dlopen of programs is supported. dlopen_self=$enable_dlopen_self # Whether dlopen of statically linked programs is supported. dlopen_self_static=$enable_dlopen_self_static # Commands to strip libraries. old_striplib=$lt_old_striplib striplib=$lt_striplib # The linker used to build libraries. LD=$lt_LD # How to create reloadable object files. reload_flag=$lt_reload_flag reload_cmds=$lt_reload_cmds # Commands used to build an old-style archive. old_archive_cmds=$lt_old_archive_cmds # A language specific compiler. CC=$lt_compiler # Is the compiler the GNU compiler? with_gcc=$GCC # Compiler flag to turn off builtin functions. no_builtin_flag=$lt_lt_prog_compiler_no_builtin_flag # Additional compiler flags for building library objects. pic_flag=$lt_lt_prog_compiler_pic # How to pass a linker flag through the compiler. wl=$lt_lt_prog_compiler_wl # Compiler flag to prevent dynamic linking. link_static_flag=$lt_lt_prog_compiler_static # Does compiler simultaneously support -c and -o options? compiler_c_o=$lt_lt_cv_prog_compiler_c_o # Whether or not to add -lc for building shared libraries. build_libtool_need_lc=$archive_cmds_need_lc # Whether or not to disallow shared libs when runtime libs are static. allow_libtool_libs_with_static_runtimes=$enable_shared_with_static_runtimes # Compiler flag to allow reflexive dlopens. export_dynamic_flag_spec=$lt_export_dynamic_flag_spec # Compiler flag to generate shared objects directly from archives. whole_archive_flag_spec=$lt_whole_archive_flag_spec # Whether the compiler copes with passing no objects directly. compiler_needs_object=$lt_compiler_needs_object # Create an old-style archive from a shared archive. old_archive_from_new_cmds=$lt_old_archive_from_new_cmds # Create a temporary old-style archive to link instead of a shared archive. old_archive_from_expsyms_cmds=$lt_old_archive_from_expsyms_cmds # Commands used to build a shared archive. archive_cmds=$lt_archive_cmds archive_expsym_cmds=$lt_archive_expsym_cmds # Commands used to build a loadable module if different from building # a shared archive. module_cmds=$lt_module_cmds module_expsym_cmds=$lt_module_expsym_cmds # Whether we are building with GNU ld or not. with_gnu_ld=$lt_with_gnu_ld # Flag that allows shared libraries with undefined symbols to be built. allow_undefined_flag=$lt_allow_undefined_flag # Flag that enforces no undefined symbols. no_undefined_flag=$lt_no_undefined_flag # Flag to hardcode \$libdir into a binary during linking. # This must work even if \$libdir does not exist hardcode_libdir_flag_spec=$lt_hardcode_libdir_flag_spec # Whether we need a single "-rpath" flag with a separated argument. hardcode_libdir_separator=$lt_hardcode_libdir_separator # Set to "yes" if using DIR/libNAME\${shared_ext} during linking hardcodes # DIR into the resulting binary. hardcode_direct=$hardcode_direct # Set to "yes" if using DIR/libNAME\${shared_ext} during linking hardcodes # DIR into the resulting binary and the resulting library dependency is # "absolute",i.e impossible to change by setting \${shlibpath_var} if the # library is relocated. hardcode_direct_absolute=$hardcode_direct_absolute # Set to "yes" if using the -LDIR flag during linking hardcodes DIR # into the resulting binary. hardcode_minus_L=$hardcode_minus_L # Set to "yes" if using SHLIBPATH_VAR=DIR during linking hardcodes DIR # into the resulting binary. hardcode_shlibpath_var=$hardcode_shlibpath_var # Set to "yes" if building a shared library automatically hardcodes DIR # into the library and all subsequent libraries and executables linked # against it. hardcode_automatic=$hardcode_automatic # Set to yes if linker adds runtime paths of dependent libraries # to runtime path list. inherit_rpath=$inherit_rpath # Whether libtool must link a program against all its dependency libraries. link_all_deplibs=$link_all_deplibs # Set to "yes" if exported symbols are required. always_export_symbols=$always_export_symbols # The commands to list exported symbols. export_symbols_cmds=$lt_export_symbols_cmds # Symbols that should not be listed in the preloaded symbols. exclude_expsyms=$lt_exclude_expsyms # Symbols that must always be exported. include_expsyms=$lt_include_expsyms # Commands necessary for linking programs (against libraries) with templates. prelink_cmds=$lt_prelink_cmds # Commands necessary for finishing linking programs. postlink_cmds=$lt_postlink_cmds # Specify filename containing input files. file_list_spec=$lt_file_list_spec # How to hardcode a shared library path into an executable. hardcode_action=$hardcode_action # The directories searched by this compiler when creating a shared library. compiler_lib_search_dirs=$lt_compiler_lib_search_dirs # Dependencies to place before and after the objects being linked to # create a shared library. predep_objects=$lt_predep_objects postdep_objects=$lt_postdep_objects predeps=$lt_predeps postdeps=$lt_postdeps # The library search path used internally by the compiler when linking # a shared library. compiler_lib_search_path=$lt_compiler_lib_search_path # ### END LIBTOOL CONFIG _LT_EOF case $host_os in aix3*) cat <<\_LT_EOF >> "$cfgfile" # AIX sometimes has problems with the GCC collect2 program. For some # reason, if we set the COLLECT_NAMES environment variable, the problems # vanish in a puff of smoke. if test "X${COLLECT_NAMES+set}" != Xset; then COLLECT_NAMES= export COLLECT_NAMES fi _LT_EOF ;; esac ltmain="$ac_aux_dir/ltmain.sh" # We use sed instead of cat because bash on DJGPP gets confused if # if finds mixed CR/LF and LF-only lines. Since sed operates in # text mode, it properly converts lines to CR/LF. This bash problem # is reportedly fixed, but why not run on old versions too? sed '$q' "$ltmain" >> "$cfgfile" \ || (rm -f "$cfgfile"; exit 1) if test x"$xsi_shell" = xyes; then sed -e '/^func_dirname ()$/,/^} # func_dirname /c\ func_dirname ()\ {\ \ case ${1} in\ \ */*) func_dirname_result="${1%/*}${2}" ;;\ \ * ) func_dirname_result="${3}" ;;\ \ esac\ } # Extended-shell func_dirname implementation' "$cfgfile" > $cfgfile.tmp \ && mv -f "$cfgfile.tmp" "$cfgfile" \ || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") test 0 -eq $? || _lt_function_replace_fail=: sed -e '/^func_basename ()$/,/^} # func_basename /c\ func_basename ()\ {\ \ func_basename_result="${1##*/}"\ } # Extended-shell func_basename implementation' "$cfgfile" > $cfgfile.tmp \ && mv -f "$cfgfile.tmp" "$cfgfile" \ || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") test 0 -eq $? || _lt_function_replace_fail=: sed -e '/^func_dirname_and_basename ()$/,/^} # func_dirname_and_basename /c\ func_dirname_and_basename ()\ {\ \ case ${1} in\ \ */*) func_dirname_result="${1%/*}${2}" ;;\ \ * ) func_dirname_result="${3}" ;;\ \ esac\ \ func_basename_result="${1##*/}"\ } # Extended-shell func_dirname_and_basename implementation' "$cfgfile" > $cfgfile.tmp \ && mv -f "$cfgfile.tmp" "$cfgfile" \ || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") test 0 -eq $? || _lt_function_replace_fail=: sed -e '/^func_stripname ()$/,/^} # func_stripname /c\ func_stripname ()\ {\ \ # pdksh 5.2.14 does not do ${X%$Y} correctly if both X and Y are\ \ # positional parameters, so assign one to ordinary parameter first.\ \ func_stripname_result=${3}\ \ func_stripname_result=${func_stripname_result#"${1}"}\ \ func_stripname_result=${func_stripname_result%"${2}"}\ } # Extended-shell func_stripname implementation' "$cfgfile" > $cfgfile.tmp \ && mv -f "$cfgfile.tmp" "$cfgfile" \ || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") test 0 -eq $? || _lt_function_replace_fail=: sed -e '/^func_split_long_opt ()$/,/^} # func_split_long_opt /c\ func_split_long_opt ()\ {\ \ func_split_long_opt_name=${1%%=*}\ \ func_split_long_opt_arg=${1#*=}\ } # Extended-shell func_split_long_opt implementation' "$cfgfile" > $cfgfile.tmp \ && mv -f "$cfgfile.tmp" "$cfgfile" \ || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") test 0 -eq $? || _lt_function_replace_fail=: sed -e '/^func_split_short_opt ()$/,/^} # func_split_short_opt /c\ func_split_short_opt ()\ {\ \ func_split_short_opt_arg=${1#??}\ \ func_split_short_opt_name=${1%"$func_split_short_opt_arg"}\ } # Extended-shell func_split_short_opt implementation' "$cfgfile" > $cfgfile.tmp \ && mv -f "$cfgfile.tmp" "$cfgfile" \ || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") test 0 -eq $? || _lt_function_replace_fail=: sed -e '/^func_lo2o ()$/,/^} # func_lo2o /c\ func_lo2o ()\ {\ \ case ${1} in\ \ *.lo) func_lo2o_result=${1%.lo}.${objext} ;;\ \ *) func_lo2o_result=${1} ;;\ \ esac\ } # Extended-shell func_lo2o implementation' "$cfgfile" > $cfgfile.tmp \ && mv -f "$cfgfile.tmp" "$cfgfile" \ || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") test 0 -eq $? || _lt_function_replace_fail=: sed -e '/^func_xform ()$/,/^} # func_xform /c\ func_xform ()\ {\ func_xform_result=${1%.*}.lo\ } # Extended-shell func_xform implementation' "$cfgfile" > $cfgfile.tmp \ && mv -f "$cfgfile.tmp" "$cfgfile" \ || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") test 0 -eq $? || _lt_function_replace_fail=: sed -e '/^func_arith ()$/,/^} # func_arith /c\ func_arith ()\ {\ func_arith_result=$(( $* ))\ } # Extended-shell func_arith implementation' "$cfgfile" > $cfgfile.tmp \ && mv -f "$cfgfile.tmp" "$cfgfile" \ || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") test 0 -eq $? || _lt_function_replace_fail=: sed -e '/^func_len ()$/,/^} # func_len /c\ func_len ()\ {\ func_len_result=${#1}\ } # Extended-shell func_len implementation' "$cfgfile" > $cfgfile.tmp \ && mv -f "$cfgfile.tmp" "$cfgfile" \ || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") test 0 -eq $? || _lt_function_replace_fail=: fi if test x"$lt_shell_append" = xyes; then sed -e '/^func_append ()$/,/^} # func_append /c\ func_append ()\ {\ eval "${1}+=\\${2}"\ } # Extended-shell func_append implementation' "$cfgfile" > $cfgfile.tmp \ && mv -f "$cfgfile.tmp" "$cfgfile" \ || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") test 0 -eq $? || _lt_function_replace_fail=: sed -e '/^func_append_quoted ()$/,/^} # func_append_quoted /c\ func_append_quoted ()\ {\ \ func_quote_for_eval "${2}"\ \ eval "${1}+=\\\\ \\$func_quote_for_eval_result"\ } # Extended-shell func_append_quoted implementation' "$cfgfile" > $cfgfile.tmp \ && mv -f "$cfgfile.tmp" "$cfgfile" \ || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") test 0 -eq $? || _lt_function_replace_fail=: # Save a `func_append' function call where possible by direct use of '+=' sed -e 's%func_append \([a-zA-Z_]\{1,\}\) "%\1+="%g' $cfgfile > $cfgfile.tmp \ && mv -f "$cfgfile.tmp" "$cfgfile" \ || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") test 0 -eq $? || _lt_function_replace_fail=: else # Save a `func_append' function call even when '+=' is not available sed -e 's%func_append \([a-zA-Z_]\{1,\}\) "%\1="$\1%g' $cfgfile > $cfgfile.tmp \ && mv -f "$cfgfile.tmp" "$cfgfile" \ || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") test 0 -eq $? || _lt_function_replace_fail=: fi if test x"$_lt_function_replace_fail" = x":"; then { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: Unable to substitute extended shell functions in $ofile" >&5 $as_echo "$as_me: WARNING: Unable to substitute extended shell functions in $ofile" >&2;} fi mv -f "$cfgfile" "$ofile" || (rm -f "$ofile" && cp "$cfgfile" "$ofile" && rm -f "$cfgfile") chmod +x "$ofile" cat <<_LT_EOF >> "$ofile" # ### BEGIN LIBTOOL TAG CONFIG: CXX # The linker used to build libraries. LD=$lt_LD_CXX # How to create reloadable object files. reload_flag=$lt_reload_flag_CXX reload_cmds=$lt_reload_cmds_CXX # Commands used to build an old-style archive. old_archive_cmds=$lt_old_archive_cmds_CXX # A language specific compiler. CC=$lt_compiler_CXX # Is the compiler the GNU compiler? with_gcc=$GCC_CXX # Compiler flag to turn off builtin functions. no_builtin_flag=$lt_lt_prog_compiler_no_builtin_flag_CXX # Additional compiler flags for building library objects. pic_flag=$lt_lt_prog_compiler_pic_CXX # How to pass a linker flag through the compiler. wl=$lt_lt_prog_compiler_wl_CXX # Compiler flag to prevent dynamic linking. link_static_flag=$lt_lt_prog_compiler_static_CXX # Does compiler simultaneously support -c and -o options? compiler_c_o=$lt_lt_cv_prog_compiler_c_o_CXX # Whether or not to add -lc for building shared libraries. build_libtool_need_lc=$archive_cmds_need_lc_CXX # Whether or not to disallow shared libs when runtime libs are static. allow_libtool_libs_with_static_runtimes=$enable_shared_with_static_runtimes_CXX # Compiler flag to allow reflexive dlopens. export_dynamic_flag_spec=$lt_export_dynamic_flag_spec_CXX # Compiler flag to generate shared objects directly from archives. whole_archive_flag_spec=$lt_whole_archive_flag_spec_CXX # Whether the compiler copes with passing no objects directly. compiler_needs_object=$lt_compiler_needs_object_CXX # Create an old-style archive from a shared archive. old_archive_from_new_cmds=$lt_old_archive_from_new_cmds_CXX # Create a temporary old-style archive to link instead of a shared archive. old_archive_from_expsyms_cmds=$lt_old_archive_from_expsyms_cmds_CXX # Commands used to build a shared archive. archive_cmds=$lt_archive_cmds_CXX archive_expsym_cmds=$lt_archive_expsym_cmds_CXX # Commands used to build a loadable module if different from building # a shared archive. module_cmds=$lt_module_cmds_CXX module_expsym_cmds=$lt_module_expsym_cmds_CXX # Whether we are building with GNU ld or not. with_gnu_ld=$lt_with_gnu_ld_CXX # Flag that allows shared libraries with undefined symbols to be built. allow_undefined_flag=$lt_allow_undefined_flag_CXX # Flag that enforces no undefined symbols. no_undefined_flag=$lt_no_undefined_flag_CXX # Flag to hardcode \$libdir into a binary during linking. # This must work even if \$libdir does not exist hardcode_libdir_flag_spec=$lt_hardcode_libdir_flag_spec_CXX # Whether we need a single "-rpath" flag with a separated argument. hardcode_libdir_separator=$lt_hardcode_libdir_separator_CXX # Set to "yes" if using DIR/libNAME\${shared_ext} during linking hardcodes # DIR into the resulting binary. hardcode_direct=$hardcode_direct_CXX # Set to "yes" if using DIR/libNAME\${shared_ext} during linking hardcodes # DIR into the resulting binary and the resulting library dependency is # "absolute",i.e impossible to change by setting \${shlibpath_var} if the # library is relocated. hardcode_direct_absolute=$hardcode_direct_absolute_CXX # Set to "yes" if using the -LDIR flag during linking hardcodes DIR # into the resulting binary. hardcode_minus_L=$hardcode_minus_L_CXX # Set to "yes" if using SHLIBPATH_VAR=DIR during linking hardcodes DIR # into the resulting binary. hardcode_shlibpath_var=$hardcode_shlibpath_var_CXX # Set to "yes" if building a shared library automatically hardcodes DIR # into the library and all subsequent libraries and executables linked # against it. hardcode_automatic=$hardcode_automatic_CXX # Set to yes if linker adds runtime paths of dependent libraries # to runtime path list. inherit_rpath=$inherit_rpath_CXX # Whether libtool must link a program against all its dependency libraries. link_all_deplibs=$link_all_deplibs_CXX # Set to "yes" if exported symbols are required. always_export_symbols=$always_export_symbols_CXX # The commands to list exported symbols. export_symbols_cmds=$lt_export_symbols_cmds_CXX # Symbols that should not be listed in the preloaded symbols. exclude_expsyms=$lt_exclude_expsyms_CXX # Symbols that must always be exported. include_expsyms=$lt_include_expsyms_CXX # Commands necessary for linking programs (against libraries) with templates. prelink_cmds=$lt_prelink_cmds_CXX # Commands necessary for finishing linking programs. postlink_cmds=$lt_postlink_cmds_CXX # Specify filename containing input files. file_list_spec=$lt_file_list_spec_CXX # How to hardcode a shared library path into an executable. hardcode_action=$hardcode_action_CXX # The directories searched by this compiler when creating a shared library. compiler_lib_search_dirs=$lt_compiler_lib_search_dirs_CXX # Dependencies to place before and after the objects being linked to # create a shared library. predep_objects=$lt_predep_objects_CXX postdep_objects=$lt_postdep_objects_CXX predeps=$lt_predeps_CXX postdeps=$lt_postdeps_CXX # The library search path used internally by the compiler when linking # a shared library. compiler_lib_search_path=$lt_compiler_lib_search_path_CXX # ### END LIBTOOL TAG CONFIG: CXX _LT_EOF ;; esac done # for ac_tag as_fn_exit 0 _ACEOF ac_clean_files=$ac_clean_files_save test $ac_write_fail = 0 || as_fn_error $? "write failure creating $CONFIG_STATUS" "$LINENO" 5 # configure is writing to config.log, and then calls config.status. # config.status does its own redirection, appending to config.log. # Unfortunately, on DOS this fails, as config.log is still kept open # by configure, so config.status won't be able to write to it; its # output is simply discarded. So we exec the FD to /dev/null, # effectively closing config.log, so it can be properly (re)opened and # appended to by config.status. When coming back to configure, we # need to make the FD available again. if test "$no_create" != yes; then ac_cs_success=: ac_config_status_args= test "$silent" = yes && ac_config_status_args="$ac_config_status_args --quiet" exec 5>/dev/null $SHELL $CONFIG_STATUS $ac_config_status_args || ac_cs_success=false exec 5>>config.log # Use ||, not &&, to avoid exiting from the if with $? = 1, which # would make configure fail if this is the last instruction. $ac_cs_success || as_fn_exit 1 fi if test -n "$ac_unrecognized_opts" && test "$enable_option_checking" != no; then { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: unrecognized options: $ac_unrecognized_opts" >&5 $as_echo "$as_me: WARNING: unrecognized options: $ac_unrecognized_opts" >&2;} fi libstoragemgmt-1.2.3/packaging/0000775000175000017500000000000012542455463013476 500000000000000libstoragemgmt-1.2.3/packaging/daemon/0000775000175000017500000000000012542455463014741 500000000000000libstoragemgmt-1.2.3/packaging/daemon/libstoragemgmtd0000775000175000017500000000343212537546123017773 00000000000000#!/bin/bash # # chkconfig: 2345 99 1 ### BEGIN INIT INFO # Provides: libstoragemgmtd # Required-Start: $remote_fs $local_fs # Required-Stop: $remote_fs $local_fs # Default-Start: 2 3 4 5 # Default-Stop: 0 1 6 # Short-Description: libStorageMgmt plug-in daemon # Description: libStorageMgmt plug-in daemon ### END INIT INFO . /etc/init.d/functions NAME=lsmd PROG=/usr/bin/$NAME PID=/var/run/lsm/lsmd.pid LOCK=/var/lock/subsys/libstoragemgmtd RETVAL=0 STATUS=0 if [ "`id -u`" != 0 ] ; then echo "Not root" exit 4 fi check_dirs() { test -d /var/run/lsm || mkdir -p /var/run/lsm/ipc test -d /var/run/lsm/ipc || mkdir -p /var/run/lsm/ipc } status_lsm() { status -p $PID $NAME > /dev/null 2>&1 STATUS=$? } result() { if [ $RETVAL -eq 0 ]; then success else failure fi echo } int_start() { $PROG RETVAL=$? [ $RETVAL -eq 0 ] && touch $LOCK && pidof $NAME > $PID } # See how we were called. case "$1" in start) status_lsm check_dirs echo -n "Starting $NAME daemon: " if [ $STATUS != 0 ] ; then int_start else RETVAL=$STATUS fi result ;; force-stop|stop) echo -n $"Stopping $NAME daemon: " status_lsm if [ $STATUS == 0 ] ; then killproc -p $PID $NAME -TERM RETVAL=$? if [ $RETVAL -eq 0 ]; then rm -f $LOCK rm -f $PID fi else RETVAL=0 fi result ;; force-reload | reload) status_lsm check_dirs echo -n $"Reloading $NAME daemon: " if [ $STATUS == 0 ] ; then killproc -p $PID $NAME -HUP RETVAL=$? else int_start fi result ;; restart) status_lsm check_dirs echo -n $"Restarting $NAME daemon: " if [ $STATUS == 0 ] ; then killproc -p $PID $NAME -HUP else int_start fi result ;; status) status -p $PID $NAME RETVAL=$? ;; *) echo $"Usage: $0 {start|stop|restart|reload|status|force-stop}" ;; esac exit $RETVAL libstoragemgmt-1.2.3/packaging/daemon/lsm-tmpfiles.conf0000664000175000017500000000013012542230046020122 00000000000000D /var/run/lsm 0775 root libstoragemgmt - D /var/run/lsm/ipc 0775 root libstoragemgmt - libstoragemgmt-1.2.3/packaging/daemon/libstoragemgmt.service0000664000175000017500000000033212537546123021257 00000000000000[Unit] Description=libstoragemgmt plug-in server daemon After=syslog.target [Service] ExecStart=/usr/bin/lsmd -d ExecReload=/bin/kill -HUP $MAINPID StandardError=syslog User=root [Install] WantedBy=multi-user.target libstoragemgmt-1.2.3/packaging/daemon/Makefile.am0000664000175000017500000000041012537546123016706 00000000000000 if HAVE_SYSTEMD systemdsystemunit_DATA = libstoragemgmt.service if WITH_REST_API systemdsystemunit_DATA += libstoragemgmt-rest.service endif endif EXTRA_DIST = libstoragemgmt.service lsm-tmpfiles.conf libstoragemgmtd EXTRA_DIST += libstoragemgmt-rest.service libstoragemgmt-1.2.3/packaging/daemon/libstoragemgmt-rest.service0000664000175000017500000000040612537546123022234 00000000000000[Unit] Description=libstoragemgmt plug-in server daemon Requires=libstoragemgmt.service After=syslog.target [Service] ExecStart=/usr/bin/lsm_restd ExecReload=/bin/kill -HUP $MAINPID StandardError=syslog User=libstoragemgmt [Install] WantedBy=multi-user.target libstoragemgmt-1.2.3/packaging/daemon/Makefile.in0000664000175000017500000003673612542455445016745 00000000000000# Makefile.in generated by automake 1.13.4 from Makefile.am. # @configure_input@ # Copyright (C) 1994-2013 Free Software Foundation, Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__is_gnu_make = test -n '$(MAKEFILE_LIST)' && test -n '$(MAKELEVEL)' am__make_running_with_option = \ case $${target_option-} in \ ?) ;; \ *) echo "am__make_running_with_option: internal error: invalid" \ "target option '$${target_option-}' specified" >&2; \ exit 1;; \ esac; \ has_opt=no; \ sane_makeflags=$$MAKEFLAGS; \ if $(am__is_gnu_make); then \ sane_makeflags=$$MFLAGS; \ else \ case $$MAKEFLAGS in \ *\\[\ \ ]*) \ bs=\\; \ sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \ | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \ esac; \ fi; \ skip_next=no; \ strip_trailopt () \ { \ flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \ }; \ for flg in $$sane_makeflags; do \ test $$skip_next = yes && { skip_next=no; continue; }; \ case $$flg in \ *=*|--*) continue;; \ -*I) strip_trailopt 'I'; skip_next=yes;; \ -*I?*) strip_trailopt 'I';; \ -*O) strip_trailopt 'O'; skip_next=yes;; \ -*O?*) strip_trailopt 'O';; \ -*l) strip_trailopt 'l'; skip_next=yes;; \ -*l?*) strip_trailopt 'l';; \ -[dEDm]) skip_next=yes;; \ -[JT]) skip_next=yes;; \ esac; \ case $$flg in \ *$$target_option*) has_opt=yes; break;; \ esac; \ done; \ test $$has_opt = yes am__make_dryrun = (target_option=n; $(am__make_running_with_option)) am__make_keepgoing = (target_option=k; $(am__make_running_with_option)) pkgdatadir = $(datadir)/@PACKAGE@ pkgincludedir = $(includedir)/@PACKAGE@ pkglibdir = $(libdir)/@PACKAGE@ pkglibexecdir = $(libexecdir)/@PACKAGE@ am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ @HAVE_SYSTEMD_TRUE@@WITH_REST_API_TRUE@am__append_1 = libstoragemgmt-rest.service subdir = packaging/daemon DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/Makefile.am ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = $(top_srcdir)/m4/ax_python_module.m4 \ $(top_srcdir)/m4/libtool.m4 $(top_srcdir)/m4/ltoptions.m4 \ $(top_srcdir)/m4/ltsugar.m4 $(top_srcdir)/m4/ltversion.m4 \ $(top_srcdir)/m4/lt~obsolete.m4 $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) mkinstalldirs = $(install_sh) -d CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = CONFIG_CLEAN_VPATH_FILES = AM_V_P = $(am__v_P_@AM_V@) am__v_P_ = $(am__v_P_@AM_DEFAULT_V@) am__v_P_0 = false am__v_P_1 = : AM_V_GEN = $(am__v_GEN_@AM_V@) am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@) am__v_GEN_0 = @echo " GEN " $@; am__v_GEN_1 = AM_V_at = $(am__v_at_@AM_V@) am__v_at_ = $(am__v_at_@AM_DEFAULT_V@) am__v_at_0 = @ am__v_at_1 = SOURCES = DIST_SOURCES = am__can_run_installinfo = \ case $$AM_UPDATE_INFO_DIR in \ n|no|NO) false;; \ *) (install-info --version) >/dev/null 2>&1;; \ esac am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; am__vpath_adj = case $$p in \ $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ *) f=$$p;; \ esac; am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`; am__install_max = 40 am__nobase_strip_setup = \ srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'` am__nobase_strip = \ for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||" am__nobase_list = $(am__nobase_strip_setup); \ for p in $$list; do echo "$$p $$p"; done | \ sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \ $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \ if (++n[$$2] == $(am__install_max)) \ { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \ END { for (dir in files) print dir, files[dir] }' am__base_list = \ sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \ sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g' am__uninstall_files_from_dir = { \ test -z "$$files" \ || { test ! -d "$$dir" && test ! -f "$$dir" && test ! -r "$$dir"; } \ || { echo " ( cd '$$dir' && rm -f" $$files ")"; \ $(am__cd) "$$dir" && rm -f $$files; }; \ } am__installdirs = "$(DESTDIR)$(systemdsystemunitdir)" DATA = $(systemdsystemunit_DATA) am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) ACLOCAL = @ACLOCAL@ AMTAR = @AMTAR@ AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@ AR = @AR@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ FGREP = @FGREP@ GREP = @GREP@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ JSON_CFLAGS = @JSON_CFLAGS@ JSON_LIBS = @JSON_LIBS@ LD = @LD@ LDFLAGS = @LDFLAGS@ LIBCHECK_CFLAGS = @LIBCHECK_CFLAGS@ LIBCHECK_LIBS = @LIBCHECK_LIBS@ LIBCONFIG_CFLAGS = @LIBCONFIG_CFLAGS@ LIBCONFIG_LIBS = @LIBCONFIG_LIBS@ LIBGLIB_CFLAGS = @LIBGLIB_CFLAGS@ LIBGLIB_LIBS = @LIBGLIB_LIBS@ LIBMICROHTTPD_CFLAGS = @LIBMICROHTTPD_CFLAGS@ LIBMICROHTTPD_LIBS = @LIBMICROHTTPD_LIBS@ LIBOBJS = @LIBOBJS@ LIBS = @LIBS@ LIBSM_LIBTOOL_VERSION = @LIBSM_LIBTOOL_VERSION@ LIBSM_MAJOR_VERSION = @LIBSM_MAJOR_VERSION@ LIBSM_MICRO_VERSION = @LIBSM_MICRO_VERSION@ LIBSM_MINOR_VERSION = @LIBSM_MINOR_VERSION@ LIBSM_VERSION = @LIBSM_VERSION@ LIBSM_VERSION_INFO = @LIBSM_VERSION_INFO@ LIBSM_VERSION_NUMBER = @LIBSM_VERSION_NUMBER@ LIBTOOL = @LIBTOOL@ LIBXML_CFLAGS = @LIBXML_CFLAGS@ LIBXML_LIBS = @LIBXML_LIBS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBOBJS = @LTLIBOBJS@ MAINT = @MAINT@ MAKEINFO = @MAKEINFO@ MANIFEST_TOOL = @MANIFEST_TOOL@ MKDIR_P = @MKDIR_P@ NM = @NM@ NMEDIT = @NMEDIT@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_URL = @PACKAGE_URL@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PERL = @PERL@ PKG_CONFIG = @PKG_CONFIG@ PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@ PKG_CONFIG_PATH = @PKG_CONFIG_PATH@ PYTHON = @PYTHON@ PYTHON_EXEC_PREFIX = @PYTHON_EXEC_PREFIX@ PYTHON_PLATFORM = @PYTHON_PLATFORM@ PYTHON_PREFIX = @PYTHON_PREFIX@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SSL_LIBS = @SSL_LIBS@ STRIP = @STRIP@ VERSION = @VERSION@ YAJL_LIBS = @YAJL_LIBS@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_AR = @ac_ct_AR@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ bashcompletiondir = @bashcompletiondir@ bindir = @bindir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ datadir = @datadir@ datarootdir = @datarootdir@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ install_sh = @install_sh@ libdir = @libdir@ libexecdir = @libexecdir@ localedir = @localedir@ localstatedir = @localstatedir@ mandir = @mandir@ mkdir_p = @mkdir_p@ oldincludedir = @oldincludedir@ pdfdir = @pdfdir@ pkgpyexecdir = @pkgpyexecdir@ pkgpythondir = @pkgpythondir@ prefix = @prefix@ program_transform_name = @program_transform_name@ psdir = @psdir@ pyexecdir = @pyexecdir@ pythondir = @pythondir@ sbindir = @sbindir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ systemdsystemunitdir = @systemdsystemunitdir@ target_alias = @target_alias@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ @HAVE_SYSTEMD_TRUE@systemdsystemunit_DATA = libstoragemgmt.service \ @HAVE_SYSTEMD_TRUE@ $(am__append_1) EXTRA_DIST = libstoragemgmt.service lsm-tmpfiles.conf libstoragemgmtd \ libstoragemgmt-rest.service all: all-am .SUFFIXES: $(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu packaging/daemon/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --gnu packaging/daemon/Makefile .PRECIOUS: Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs install-systemdsystemunitDATA: $(systemdsystemunit_DATA) @$(NORMAL_INSTALL) @list='$(systemdsystemunit_DATA)'; test -n "$(systemdsystemunitdir)" || list=; \ if test -n "$$list"; then \ echo " $(MKDIR_P) '$(DESTDIR)$(systemdsystemunitdir)'"; \ $(MKDIR_P) "$(DESTDIR)$(systemdsystemunitdir)" || exit 1; \ fi; \ for p in $$list; do \ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ echo "$$d$$p"; \ done | $(am__base_list) | \ while read files; do \ echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(systemdsystemunitdir)'"; \ $(INSTALL_DATA) $$files "$(DESTDIR)$(systemdsystemunitdir)" || exit $$?; \ done uninstall-systemdsystemunitDATA: @$(NORMAL_UNINSTALL) @list='$(systemdsystemunit_DATA)'; test -n "$(systemdsystemunitdir)" || list=; \ files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ dir='$(DESTDIR)$(systemdsystemunitdir)'; $(am__uninstall_files_from_dir) tags TAGS: ctags CTAGS: cscope cscopelist: distdir: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done check-am: all-am check: check-am all-am: Makefile $(DATA) installdirs: for dir in "$(DESTDIR)$(systemdsystemunitdir)"; do \ test -z "$$dir" || $(MKDIR_P) "$$dir"; \ done install: install-am install-exec: install-exec-am install-data: install-data-am uninstall: uninstall-am install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-am install-strip: if test -z '$(STRIP)'; then \ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ install; \ else \ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ fi mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-am clean-am: clean-generic clean-libtool mostlyclean-am distclean: distclean-am -rm -f Makefile distclean-am: clean-am distclean-generic dvi: dvi-am dvi-am: html: html-am html-am: info: info-am info-am: install-data-am: install-systemdsystemunitDATA install-dvi: install-dvi-am install-dvi-am: install-exec-am: install-html: install-html-am install-html-am: install-info: install-info-am install-info-am: install-man: install-pdf: install-pdf-am install-pdf-am: install-ps: install-ps-am install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-am -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-am mostlyclean-am: mostlyclean-generic mostlyclean-libtool pdf: pdf-am pdf-am: ps: ps-am ps-am: uninstall-am: uninstall-systemdsystemunitDATA .MAKE: install-am install-strip .PHONY: all all-am check check-am clean clean-generic clean-libtool \ cscopelist-am ctags-am distclean distclean-generic \ distclean-libtool distdir dvi dvi-am html html-am info info-am \ install install-am install-data install-data-am install-dvi \ install-dvi-am install-exec install-exec-am install-html \ install-html-am install-info install-info-am install-man \ install-pdf install-pdf-am install-ps install-ps-am \ install-strip install-systemdsystemunitDATA installcheck \ installcheck-am installdirs maintainer-clean \ maintainer-clean-generic mostlyclean mostlyclean-generic \ mostlyclean-libtool pdf pdf-am ps ps-am tags-am uninstall \ uninstall-am uninstall-systemdsystemunitDATA # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: libstoragemgmt-1.2.3/packaging/libstoragemgmt.spec0000664000175000017500000010123212542455451017306 00000000000000%bcond_with rest_api %bcond_without megaraid %bcond_without hpsa %bcond_without test # Use one-line macro for OBS workaround: # https://bugzilla.novell.com/show_bug.cgi?id=864323 %{?_with_rest_api: %global with_rest_api 1 } %{?_without_rest_api: %global with_rest_api 0 } %{?_with_megaraid: %global with_megaraid 1 } %{?_without_megaraid: %global with_megaraid 0 } %{?_with_hpsa: %global with_hpsa 1 } %{?_without_hpsa: %global with_hpsa 0 } %{?_with_test: %global with_test 1 } %{?_without_test: %global with_test 0 } %define libsoname libstoragemgmt %if 0%{?suse_version} || 0%{?fedora} >= 15 || 0%{?rhel} >= 7 %define with_systemd 1 %endif %global libstoragemgmt libstoragemgmt %if 0%{?suse_version} %global libstoragemgmt libstoragemgmt1 %endif %define udev_dir /lib # Later versions moved /lib to /usr/lib %if 0%{?fedora} >= 18 || 0%{?rhel} >= 7 || 0%{?suse_version} %define udev_dir /usr/lib %endif %if 0%{?suse_version} # Use fdupes on openSuSE. # For Fedora, it will conflict with brp-python-bytecompile # For RHEL, fdupes is in EPEL repo. %define do_fdupes 1 %endif Name: libstoragemgmt Version: 1.2.3 Release: 1%{?dist} Summary: Storage array management library Group: System Environment/Libraries %if 0%{?suse_version} License: LGPL-2.1+ %else License: LGPLv2+ %endif URL: https://github.com/libstorage/libstoragemgmt/ Source0: https://github.com/libstorage/libstoragemgmt/releases/download/%{version}/libstoragemgmt-%{version}.tar.gz BuildRoot: %{_tmppath}/%{name}-%{version}-%{release}-root-%(%{__id_u} -n) Requires: %{libstoragemgmt}-python BuildRequires: autoconf automake libtool libxml2-devel check-devel perl BuildRequires: openssl-devel BuildRequires: python-argparse BuildRequires: glib2-devel # Explicitly require gcc-c++ is for OBS BuildRequires: gcc-c++ BuildRequires: libconfig-devel %if 0%{?suse_version} BuildRequires: libyajl-devel %else # Fedora RHEL BuildRequires: yajl-devel %endif %if 0%{?do_fdupes} BuildRequires: fdupes %endif %if 0%{?rhel} == 6 BuildRequires: python-ordereddict %else # Require bash-completion > 2.0 BuildRequires: bash-completion >= 2.0 %endif %if 0%{?with_systemd} BuildRequires: systemd Requires(post): systemd Requires(preun): systemd Requires(postun): systemd %endif %description The libStorageMgmt library will provide a vendor agnostic open source storage application programming interface (API) that will allow management of storage arrays. The library includes a command line interface for interactive use and scripting (command lsmcli). The library also has a daemon that is used for executing plug-ins in a separate process (lsmd). %if %{libstoragemgmt} != %{name} %package -n %{libstoragemgmt} Summary: Storage array management library Group: System Environment/Libraries Requires: %{libstoragemgmt}-python %description -n %{libstoragemgmt} The libStorageMgmt library will provide a vendor agnostic open source storage application programming interface (API) that will allow management of storage arrays. The library includes a command line interface for interactive use and scripting (command lsmcli). The library also has a daemon that is used for executing plug-ins in a separate process (lsmd). %endif %package devel Summary: Development files for %{name} Group: Development/Libraries Requires: %{name}%{?_isa} = %{version}-%{release} %description devel The %{name}-devel package contains libraries and header files for developing applications that use %{name}. %package -n %{libstoragemgmt}-python Summary: Python client libraries and plug-in support for %{libstoragemgmt} Group: System Environment/Libraries Requires: %{libstoragemgmt} = %{version}-%{release} BuildArch: noarch Requires: python-argparse %if 0%{?rhel} == 6 # No way to detect 6.2 yet. Just forcing all RHEL 6 to install # python-ordereddict just in case. Requires: python-ordereddict %endif %description -n %{libstoragemgmt}-python The %{libstoragemgmt}-python package contains python client libraries as well as python framework support and open source plug-ins written in python. %package -n %{libstoragemgmt}-smis-plugin Summary: Files for SMI-S generic array support for %{libstoragemgmt} Group: System Environment/Libraries %if 0%{?suse_version} BuildRequires: python-pywbem Requires: python-pywbem %else BuildRequires: pywbem Requires: pywbem %endif Requires: %{libstoragemgmt}-python = %{version}-%{release} BuildArch: noarch %description -n %{libstoragemgmt}-smis-plugin The %{libstoragemgmt}-smis-plugin package contains plug-in for generic SMI-S array support. %package -n %{libstoragemgmt}-netapp-plugin Summary: Files for NetApp array support for %{libstoragemgmt} Group: System Environment/Libraries %if 0%{?suse_version} BuildRequires: python-M2Crypto Requires: python-M2Crypto %else BuildRequires: m2crypto Requires: m2crypto %endif Requires: %{libstoragemgmt}-python = %{version}-%{release} BuildArch: noarch %description -n %{libstoragemgmt}-netapp-plugin The %{libstoragemgmt}-netapp-plugin package contains plug-in for NetApp array support. %package -n %{libstoragemgmt}-targetd-plugin Summary: Files for targetd array support for %{libstoragemgmt} Group: System Environment/Libraries Requires: %{libstoragemgmt}-python = %{version}-%{release} BuildArch: noarch %description -n %{libstoragemgmt}-targetd-plugin The %{libstoragemgmt}-targetd-plugin package contains plug-in for targetd array support. %package -n %{libstoragemgmt}-nstor-plugin Summary: Files for NexentaStor array support for %{libstoragemgmt} Group: System Environment/Libraries Requires: %{libstoragemgmt}-python = %{version}-%{release} BuildArch: noarch %description -n %{libstoragemgmt}-nstor-plugin The %{libstoragemgmt}-nstor-plugin package contains plug-in for NexentaStor array support. %package udev Summary: Udev files for %{name} Group: System Environment/Base %description udev The %{name}-udev package contains udev rules and helper utilities for uevents generated by the kernel. %if 0%{?with_rest_api} %package -n %{libstoragemgmt}-rest Summary: REST API daemon for %{libstoragemgmt} Group: System Environment/Daemons Requires: %{libstoragemgmt}%{?_isa} = %{version}-%{release} BuildRequires: libmicrohttpd-devel %if 0%{?suse_version} BuildRequires: libjson-devel procps Requires: libjson0 Requires: libmicrohttpd10 %else # Fedora RHEL BuildRequires: json-c-devel Requires: json-c Requires: libmicrohttpd %endif %description -n %{libstoragemgmt}-rest the %{libstoragemgmt}-rest package contains the http daemon for %{libstoragemgmt} rest api. %endif %if 0%{?with_megaraid} %package -n %{libstoragemgmt}-megaraid-plugin Summary: Files for LSI MegaRAID support for %{libstoragemgmt} Group: System Environment/Libraries Requires: %{libstoragemgmt}%{?_isa} = %{version}-%{release} BuildArch: noarch %description -n %{libstoragemgmt}-megaraid-plugin The %{libstoragemgmt}-megaraid-plugin package contains the plugin for LSI MegaRAID storage management via storcli. %endif %if 0%{?with_hpsa} %package -n %{libstoragemgmt}-hpsa-plugin Summary: Files for HP SmartArray support for %{libstoragemgmt} Group: System Environment/Libraries Requires: %{libstoragemgmt}%{?_isa} = %{version}-%{release} BuildArch: noarch %description -n %{libstoragemgmt}-hpsa-plugin The %{libstoragemgmt}-hpsa-plugin package contains the plugin for HP SmartArray storage management via hpssacli. %endif %prep %setup -q %build ./autogen.sh %configure \ %if 0%{?with_rest_api} != 1 --without-rest-api \ %endif %if 0%{?with_megaraid} != 1 --without-megaraid \ %endif %if 0%{?with_hpsa} != 1 --without-hpsa \ %endif --disable-static V=1 make %{?_smp_mflags} %install rm -rf %{buildroot} make install DESTDIR=%{buildroot} find %{buildroot} -name '*.la' -exec rm -f {} ';' %if 0%{?with_systemd} install -d -m755 %{buildroot}/%{_unitdir} install -m644 packaging/daemon/libstoragemgmt.service \ %{buildroot}/%{_unitdir}/libstoragemgmt.service #tempfiles.d configuration for /var/run mkdir -p %{buildroot}/%{_tmpfilesdir} install -m 0644 packaging/daemon/lsm-tmpfiles.conf \ %{buildroot}/%{_tmpfilesdir}/%{name}.conf %else #Need these to exist at install so we can start the daemon mkdir -p %{buildroot}/etc/rc.d/init.d install packaging/daemon/libstoragemgmtd \ %{buildroot}/etc/rc.d/init.d/libstoragemgmtd %endif #Files for udev handling mkdir -p %{buildroot}/%{udev_dir}/udev/rules.d install -m 644 tools/udev/90-scsi-ua.rules \ %{buildroot}/%{udev_dir}/udev/rules.d/90-scsi-ua.rules install -m 755 tools/udev/scan-scsi-target \ %{buildroot}/%{udev_dir}/udev/scan-scsi-target %if 0%{?with_rest_api} %if 0%{?with_systemd} %{__install} -m 0644 packaging/daemon/libstoragemgmt-rest.service \ %{buildroot}/%{_unitdir}/libstoragemgmt-rest.service %endif %endif # Deduplication %if 0%{?do_fdupes} %fdupes -s %{buildroot}/%{python_sitelib}/lsm %endif %clean rm -rf %{buildroot} %if 0%{?with_test} %check if ! make check then cat test/test-suite.log || true exit 1 fi %endif %pre -n %{libstoragemgmt} if [ $1 -eq 1 ]; then #New install. getent group libstoragemgmt >/dev/null || groupadd -r libstoragemgmt getent passwd libstoragemgmt >/dev/null || \ useradd -r -g libstoragemgmt -d %{_localstatedir}/run/lsm \ -s /sbin/nologin \ -c "daemon account for libstoragemgmt" libstoragemgmt #Need these to exist at install so we can start the daemon mkdir -p %{_localstatedir}/run/lsm/ipc chmod 0755 %{_localstatedir}/run/lsm chmod 0755 %{_localstatedir}/run/lsm/ipc chown libstoragemgmt:libstoragemgmt %{_localstatedir}/run/lsm chown libstoragemgmt:libstoragemgmt %{_localstatedir}/run/lsm/ipc fi %post -n %{libstoragemgmt} /sbin/ldconfig %if 0%{?with_systemd} %if 0%{?suse_version} %service_add_post libstoragemgmt.service %else %systemd_post libstoragemgmt.service %endif %else /sbin/chkconfig --add libstoragemgmtd %endif %preun -n %{libstoragemgmt} %if 0%{?with_systemd} %if 0%{?suse_version} %service_del_preun libstoragemgmt.service %else %systemd_preun libstoragemgmt.service %endif %else /etc/rc.d/init.d/libstoragemgmtd stop > /dev/null 2>&1 || : /sbin/chkconfig --del libstoragemgmtd %endif %postun -n %{libstoragemgmt} /sbin/ldconfig %if 0%{?with_systemd} %if 0%{?suse_version} %service_del_postun libstoragemgmt.service %else %systemd_postun libstoragemgmt.service %endif %else #Restart the daemond /etc/rc.d/init.d/libstoragemgmtd restart >/dev/null 2>&1 || : %endif %if 0%{?with_rest_api} %post -n %{libstoragemgmt}-rest %if 0%{?with_systemd} %if 0%{?suse_version} %service_add_post libstoragemgmt-rest.service %else %systemd_post libstoragemgmt-rest.service %endif %endif %preun -n %{libstoragemgmt}-rest %if 0%{?with_systemd} %if 0%{?suse_version} %service_del_preun libstoragemgmt-rest.service %else %systemd_preun libstoragemgmt-rest.service %endif %endif %postun -n %{libstoragemgmt}-rest %if 0%{?with_systemd} %if 0%{?suse_version} %service_del_postun libstoragemgmt-rest.service %else %systemd_postun libstoragemgmt-rest.service %endif %endif %endif # Need to restart lsmd if plugin is new installed or removed. %post -n %{libstoragemgmt}-smis-plugin if [ $1 -eq 1 ]; then # New install. /usr/bin/systemctl try-restart libstoragemgmt.service \ >/dev/null 2>&1 || : fi %postun -n %{libstoragemgmt}-smis-plugin if [ $1 -eq 0 ]; then # Remove /usr/bin/systemctl try-restart libstoragemgmt.service \ >/dev/null 2>&1 || : fi # Need to restart lsmd if plugin is new installed or removed. %post -n %{libstoragemgmt}-netapp-plugin if [ $1 -eq 1 ]; then # New install. /usr/bin/systemctl try-restart libstoragemgmt.service \ >/dev/null 2>&1 || : fi %postun -n %{libstoragemgmt}-netapp-plugin if [ $1 -eq 0 ]; then # Remove /usr/bin/systemctl try-restart libstoragemgmt.service \ >/dev/null 2>&1 || : fi # Need to restart lsmd if plugin is new installed or removed. %post -n %{libstoragemgmt}-targetd-plugin if [ $1 -eq 1 ]; then # New install. /usr/bin/systemctl try-restart libstoragemgmt.service \ >/dev/null 2>&1 || : fi %postun -n %{libstoragemgmt}-targetd-plugin if [ $1 -eq 0 ]; then # Remove /usr/bin/systemctl try-restart libstoragemgmt.service \ >/dev/null 2>&1 || : fi # Need to restart lsmd if plugin is new installed or removed. %post -n %{libstoragemgmt}-nstor-plugin if [ $1 -eq 1 ]; then # New install. /usr/bin/systemctl try-restart libstoragemgmt.service \ >/dev/null 2>&1 || : fi %postun -n %{libstoragemgmt}-nstor-plugin if [ $1 -eq 0 ]; then # Remove /usr/bin/systemctl try-restart libstoragemgmt.service \ >/dev/null 2>&1 || : fi %if 0%{?with_megaraid} # Need to restart lsmd if plugin is new installed or removed. %post -n %{libstoragemgmt}-megaraid-plugin if [ $1 -eq 1 ]; then # New install. /usr/bin/systemctl try-restart libstoragemgmt.service \ >/dev/null 2>&1 || : fi %postun -n %{libstoragemgmt}-megaraid-plugin if [ $1 -eq 0 ]; then # Remove /usr/bin/systemctl try-restart libstoragemgmt.service \ >/dev/null 2>&1 || : fi %endif %if 0%{?with_hpsa} # Need to restart lsmd if plugin is new installed or removed. %post -n %{libstoragemgmt}-hpsa-plugin if [ $1 -eq 1 ]; then # New install. /usr/bin/systemctl try-restart libstoragemgmt.service \ >/dev/null 2>&1 || : fi %postun -n %{libstoragemgmt}-hpsa-plugin if [ $1 -eq 0 ]; then # Remove /usr/bin/systemctl try-restart libstoragemgmt.service \ >/dev/null 2>&1 || : fi %endif %files -n %{libstoragemgmt} %defattr(-,root,root,-) %doc README COPYING.LIB %{_mandir}/man1/lsmcli.1* %{_mandir}/man1/lsmd.1* %{_mandir}/man5/lsmd.conf.5* %{_libdir}/*.so.* %{_bindir}/lsmcli %{_bindir}/lsmd %{_bindir}/simc_lsmplugin %{_sysconfdir}/lsm/lsmd.conf %dir %{_sysconfdir}/lsm %dir %{_sysconfdir}/lsm/pluginconf.d %{_mandir}/man1/simc_lsmplugin.1* %if 0%{?rhel} == 6 %{_sysconfdir}/bash_completion.d/lsmcli %else %{_datadir}/bash-completion/completions/lsmcli %endif %if 0%{?with_systemd} %{_unitdir}/libstoragemgmt.service %endif %if 0%{?with_systemd} %attr(0644, root, root) %{_tmpfilesdir}/%{name}.conf %else %attr(0755, root, root) /etc/rc.d/init.d/libstoragemgmtd %endif %files devel %defattr(-,root,root,-) %{_includedir}/* %{_libdir}/*.so %{_libdir}/pkgconfig/libstoragemgmt.pc %files -n %{libstoragemgmt}-python %defattr(-,root,root,-) #Python library files %dir %{python_sitelib}/lsm %{python_sitelib}/lsm/__init__.* %dir %{python_sitelib}/lsm/external %{python_sitelib}/lsm/external/* %{python_sitelib}/lsm/_client.* %{python_sitelib}/lsm/_common.* %{python_sitelib}/lsm/_data.* %{python_sitelib}/lsm/_iplugin.* %{python_sitelib}/lsm/_pluginrunner.* %{python_sitelib}/lsm/_transport.* %{python_sitelib}/lsm/version.* %dir %{python_sitelib}/lsm/plugin %{python_sitelib}/lsm/plugin/__init__.* %dir %{python_sitelib}/lsm/plugin/sim %{python_sitelib}/lsm/plugin/sim/__init__.* %{python_sitelib}/lsm/plugin/sim/simulator.* %{python_sitelib}/lsm/plugin/sim/simarray.* %dir %{python_sitelib}/lsm/lsmcli %{python_sitelib}/lsm/lsmcli/__init__.* %{python_sitelib}/lsm/lsmcli/data_display.* %{python_sitelib}/lsm/lsmcli/cmdline.* %{_bindir}/sim_lsmplugin %{_sysconfdir}/lsm/pluginconf.d/sim.conf %{_mandir}/man1/sim_lsmplugin.1* %files -n %{libstoragemgmt}-smis-plugin %defattr(-,root,root,-) %dir %{python_sitelib}/lsm/plugin/smispy %{python_sitelib}/lsm/plugin/smispy/__init__.* %{python_sitelib}/lsm/plugin/smispy/smis.* %{python_sitelib}/lsm/plugin/smispy/dmtf.* %{python_sitelib}/lsm/plugin/smispy/utils.* %{python_sitelib}/lsm/plugin/smispy/smis_common.* %{python_sitelib}/lsm/plugin/smispy/smis_cap.* %{python_sitelib}/lsm/plugin/smispy/smis_sys.* %{python_sitelib}/lsm/plugin/smispy/smis_pool.* %{python_sitelib}/lsm/plugin/smispy/smis_disk.* %{python_sitelib}/lsm/plugin/smispy/smis_vol.* %{python_sitelib}/lsm/plugin/smispy/smis_ag.* %{_bindir}/smispy_lsmplugin %{_mandir}/man1/smispy_lsmplugin.1* %files -n %{libstoragemgmt}-netapp-plugin %defattr(-,root,root,-) %dir %{python_sitelib}/lsm/plugin/ontap %{python_sitelib}/lsm/plugin/ontap/__init__.* %{python_sitelib}/lsm/plugin/ontap/na.* %{python_sitelib}/lsm/plugin/ontap/ontap.* %{_bindir}/ontap_lsmplugin %{_mandir}/man1/ontap_lsmplugin.1* %files -n %{libstoragemgmt}-targetd-plugin %defattr(-,root,root,-) %dir %{python_sitelib}/lsm/plugin/targetd %{python_sitelib}/lsm/plugin/targetd/__init__.* %{python_sitelib}/lsm/plugin/targetd/targetd.* %{_bindir}/targetd_lsmplugin %{_mandir}/man1/targetd_lsmplugin.1* %files -n %{libstoragemgmt}-nstor-plugin %defattr(-,root,root,-) %dir %{python_sitelib}/lsm/plugin/nstor %{python_sitelib}/lsm/plugin/nstor/__init__.* %{python_sitelib}/lsm/plugin/nstor/nstor.* %{_bindir}/nstor_lsmplugin %{_mandir}/man1/nstor_lsmplugin.1* %if 0%{?with_megaraid} %files -n %{libstoragemgmt}-megaraid-plugin %defattr(-,root,root,-) %dir %{python_sitelib}/lsm/plugin/megaraid %{python_sitelib}/lsm/plugin/megaraid/__init__.* %{python_sitelib}/lsm/plugin/megaraid/megaraid.* %{python_sitelib}/lsm/plugin/megaraid/utils.* %{_bindir}/megaraid_lsmplugin %{_sysconfdir}/lsm/pluginconf.d/megaraid.conf %{_mandir}/man1/megaraid_lsmplugin.1* %endif %if 0%{?with_hpsa} %files -n %{libstoragemgmt}-hpsa-plugin %defattr(-,root,root,-) %dir %{python_sitelib}/lsm/plugin/hpsa %{python_sitelib}/lsm/plugin/hpsa/__init__.* %{python_sitelib}/lsm/plugin/hpsa/hpsa.* %{python_sitelib}/lsm/plugin/hpsa/utils.* %{_bindir}/hpsa_lsmplugin %{_sysconfdir}/lsm/pluginconf.d/hpsa.conf %{_mandir}/man1/hpsa_lsmplugin.1* %endif %files udev %defattr(-,root,root,-) %{udev_dir}/udev/scan-scsi-target %{udev_dir}/udev/rules.d/90-scsi-ua.rules %if 0%{?with_rest_api} %files -n %{libstoragemgmt}-rest %defattr(-,root,root,-) %{_bindir}/lsm_restd %if 0%{?with_systemd} %{_unitdir}/libstoragemgmt-rest.service %endif %endif %changelog * Wed Jun 24 2015 Gris Ge 1.2.3-1 - Bug fixes: * lsmcli bash completion: Fix syntax error. * lsmcli bash completion: Fix volume-delete. * lsmcli bash completion: Add missing completions. * Tue Jun 23 2015 Gris Ge 1.2.2-1 - Bug fixes: * Fix: selinux dac_override * Manpage: Update hpsa and megaraid plugin manpages. * HP Smart Array Plugin: Fix pool querying on P410i. * MegaRAID Plugin: Fix bug when no volume configured. * Wed Jun 17 2015 Gris Ge 1.2.1-1 - Bug fix: * Fix 'make distcheck' error on bash-completion. * Tue Jun 16 2015 Gris Ge 1.2.0-1 - New features: * The lsmd daemon now allows plugin to run as root using configure file. * Targetd plugin got full access group support. * The simulator plugin switched from risky pickle to sqlite3 for state saving. * Introduced bash auto completion support for lsmcli command. * Two new plugins for hardware RAID cards: * LSI MegaRAID plugin -- megaraid:// Dell PERC and other OEM rebanded MegaRAID cards are also supported. * HP SmartArray plugin -- hpsa:// - Library adds: * New method to query RAID information of volume: Python: lsm.Client.volume_raid_info(). C: lsm_volume_raid_info(). * New method to query pool membership: Python: lsm.Client.pool_member_info(). C: lsm_pool_member_info(). * New disk status constant to indicate certain disk could be used as pool disk or dedicate spare disk. Python: DISK.STATUS_FREE. C: LSM_DISK_STATUS_FREE. * New method to create RAID volume on hardware RAID cards: Python: lsm.Client.volume_raid_create_cap_get(). lsm.Client.volume_raid_create(). C: lsm_volume_raid_create_cap_get(). lsm_volume_raid_create(). * New C plugin register interface for version 1.2 new methods: lsm_register_plugin_v1_2() - Bug fixes and miscellaneous fixes: * lsmd: Fix a possible infinity loop on plugin search. * Fix memory leak in C unit test. * Library: Fix incorrect Volume.vpd83 definition * SMI-S plugin: Fix SMI-S plugin spare disk support. * SMI-S plugin: Fix target_ports() for HP 3PAR and EMC VMAX. * SMI-S plugin: Fix the incorrect profile_check(). * Fix C library compile warning. * Fix C library potential memory leak in handle_volume_raid_create(). * ONTAP, MegaRAID, SMI-S Plugins: Enforce the definition of Pool.status. * Change license statement by replacing address to URL. * lsmd: add error catch for signal. * lsmcli: fix _get_item error message. * C Library: Fix message loop * C Library: Clean up code for DRY(don't repeat yourself). * SMI-S Plugin: Collect xml during exception. * C Library: Remove ambiguity between wrong type or OOM * C code clean up to use K&R code style. * Add Charles Rose to AUTHORS. * Thu Dec 4 2014 Tony Asleson 1.1.0-1 - Library adds: API Constants for new pool element types and plugin changes to support it * C constants: LSM_POOL_ELEMENT_TYPE_VOLUME_FULL, LSM_POOL_ELEMENT_TYPE_VOLUME_THIN * Py constants: Pool.ELEMENT_TYPE_VOLUME_FULL, Poll.ELEMENT_TYPE_THIN lsmcli: * lt - Alias for 'list --type target_ports' * Removed --init for volume-mask, it was broken for targetd (the only user) and instead of fixing we are going to improve targetd to support access groups in the next release - Numerous code improvements, including a big SMI-S plugin refactor, source code documentation corrections - Bug fix: Use correct default values for anonymous uid/gid in lsmcli - Bug fix: simc simulator not working for allowable NULL parameters for: * fs_child_dependency * fs_child_dependency_rm * fs_snapshot_restore - Bug fix: lsm_restd memory leak corrections - Bug fix: NetApp plugin, correctly set export path when caller specifies default in API - Bug fix: Add file locking to sim plugin to prevent concurrent modification - Bug fix: Consistently report common error conditions for NO_STATE_CHANGE, EXISTS_INITIATOR for all plugins - Bug fix: Number of bugs addressed in SMI-S plugin including: * EMC: Correct error path when replicating a volume with a duplicate volume name * HDS: Correctly create thinly provisioned volume on thinly provisioned pool * Sun Sep 7 2014 Tony Asleson 1.0.0-1 - Release version 1 - Numerous constants re-naming & removing - Removed the pool create/delete until things work better, esp. WRT SMI-S - Added checks for initiator ID verification - Added checks for vpd 0x83 verification - Simplified error logging (removed domain & level) - Re-named functions for online,offline -> enable,disable - Always use objects instead of object ID in function params - Removed individual files from fs snapshot creation - Add unsupported actions for pools - lsm_capability_set_n uses a -1 to terminate list - Volume status removed, replaced with admin state - Removed ibmiv7k plugin - Explicitly specify python2 - Error path consistency changes (same error for same condition across plug-ins) - Numerous bug fixes * Thu Jul 3 2014 Tony Asleson 0.1.0-1 - Release candidate for a 1.0.0 release - Optional data removed - Initiator only functions removed - Pool create from from volumes removed - Code directory structure updated - Target port listing added * Tue Feb 18 2014 Gris Ge 0.0.24-2 - Introduce a REST daemon(only for systemd yet). - Allowing enable/disable ibm_v7k plugin via 'rpmbuild --with ibm_v7k' or 'rpmbuild --without ibm_v7k'. - Fix the compile warning about data of changelog by changing 'Thu Nov 27 2013' to 'Wed Nov 27 2013'. * Thu Jan 30 2014 Tony Asleson 0.0.24-1 - Command line interface (CLI) re-factored and improved to be easier to use and more consistent, man pages have been updated - Command line output now has '-s, --script' for an additional way to output information for consumption in scripts - Command line option '-o' for retrieving optional/extended data for disks & pools - Pool creation/deleting in CLI & python API - Numerous small bug fixes - C API, added ability to list disks, list plugins and retrieve optional data for disks - SSL for SMI-S is more stringent on certificate checking for newer distributions, new URI option "no_ssl_verify=yes" to disable * Wed Nov 27 2013 Tony Asleson 0.0.23-1 - Addition of listing disks implemented for SMI-S and Ontap plugins (new, not in C library yet) - Add the ability to list currently installed and usable plug-ins - Verify return types are correct in python client calls - Added the ability to retrieve optional data (new, not in C library yet) - Visibility reductions for python code (somethings were public when should be private - Add calls to create/delete pools (new, not in C library yet) - Add missing initiator type for SAS - Improved vpd83 retrieval for SMI-S - Performance improvements for SMI-S plug-in - Numerous small bug fixes - Nstor plugin, additional testing and bug fixes - lsmd, added call to setgroups and enable full relo and PIE (ASLR) for security improvements - simulator state is now versioned - SCSI Unit Attention uevent handling * Mon Aug 12 2013 Tony Asleson 0.0.22-1 - Numerous code improvments/fixes - BZ 968384 - BZ 990577 * Tue Jul 16 2013 Tony Asleson 0.0.21-1 - Don't include IBM7K plugin for RHEL > 6 missing paramakio - IEC binary size handling - Functionality improvements for IBM V7K array - Workaround for python bug on F19 - Bugfix (BZ 968384) - Package plug-ins as separately in rpm packages * Fri May 24 2013 Tony Asleson 0.0.20-1 - Python library files now in separate rpm - Additional debug for plug-ins when exceptions occur - iSCSI CHAP support modified to handle both inbound and outbound authentication - VOLUME_THIN Added as new capability flag - IBM V7000 storage array support - NFS export support for targetd - EXPORT_CUSTOM_PATH added capability flag * Sat Apr 20 2013 Tony Asleson 0.0.19-1 - Improved E-Series array support - Ontap plug-in: improve performance with many Volumes - lsmcli: Number of corrections on handling unit specifiers - lsmcli: Correct stack track when stdout is written to while closed - Fix build to work with automake >= 1.12 * Thu Mar 7 2013 Tony Asleson 0.0.18-1 - lsmd: Re-written in C - Simplify fs_delete - Corrections for C client against Python plugin - Testing: Run cross language unit test too - Initial FS support for targetd plugin - Fix multi-arch python issues which prevent py and compiled py files from being identical on different arches * Thu Jan 31 2013 Tony Asleson 0.0.17-1 - Inconsistency corrections between C and Python API - Source code documentation updates - NexentaStor plug-in has been added * Wed Jan 2 2013 Tony Asleson 0.0.16-1 - lsmcli: Add confirmation prompt for data loss operations - lsmcli: Display enumerated values as text - lsmcli: Exit with 7 for --job-status when not complete - Fixed URI example to reference an existing plug-in - lsmcli: Retrieve plug-in desc. and version (lsmcli --plugin-info) - simc: Implement CHAP auth function (no-op) - lsmcli: Change check for determining if lsmd is running - Disable mirroring for SMI-S as it needs some re-work * Mon Nov 19 2012 Tony Asleson 0.0.15-1 - Pool parameter is optional when replicating a volume - Code improvements(Memory leak fix, lsmcli checks if lsmd is running) - Source code documentation updates - Ability to override simulator data storage location - make check target added to run unit tests * Fri Oct 19 2012 Tony Asleson 0.0.14-1 - test/cmdline.py added to automatically test what an array supports - Bug fixes (local plug-in execution, smi-s delete clone, code warnings) - targetd: (uri syntax consistency change, initialization code change) - Pool id added to volume information - lsmcli: Added --replicate-volume-range-block-size to retrieve replicated block size * Fri Sep 28 2012 Tony Asleson (Red Hat) 0.0.13-1 - targetD Feature adds/fixes for initiators, init_granted_to_volume, volumes_accessible_by_init, initiator_grant, initiator_revoke - SMI-S added compatibility with CIM_StorageConfigurationService - SMI-S bug fixes/changes to support XIV arrays (Basic functionality verified) - SMI-S Proxy layer added to allow different internal implementations of smi-s client - Added missing version information for C plug-in API - lsmcli URI can be stored in file .lsmcli in users home directory * Fri Sep 07 2012 Tony Asleson (Red Hat) 0.0.12-1 - SMI-S plug-in enhancements (Detach before delete, bug fixes for eSeries) - Added version specifier for non-opaque structs in plug-in callback interface - Documentation updates (doxygen, man pages) - Ontap plug-in: support timeout values - lsmcli, return back async. values other than volumes when using --job-status * Mon Aug 13 2012 Tony Asleson 0.0.11-1 - SMI-S fixes and improvements (WaitForCopyState, _get_class_instance) - Methods for arrays that don't support access groups to grant access for luns to initiators etc. - ISCSI Chap authentication - System level status field for overall array status - targetd updates for mapping targets to initiators - Simulator updates (python & C) - Removed tog-pegasus dependency (SMI-S is python plug-in) - Removed lsmVolumeStatus as it was implemented and redundant - initscript, check for /var/run and create if missing * Fri Jul 20 2012 Tony Asleson 0.0.10-1 - Numerous updates and re-name for plug-in targetd_lsmplugin - targetd_lsmplugin included in release - Memory leak fixes and improved unit tests - Initial capability query support, implemented for all plug-ins - Flags variable added to API calls, (Warning: C API/ABI breakage, python unaffected) - Bug fixes for NetApp ontap plug-in - SMI-S bug fixes (initiator listing and replication, mode and sync types) - Added ability to specify mirroring async or sync for replication - Added version header file to allow client version header checks - Simulator plug-in written in C, simc_lsmplugin is available * Tue Jun 12 2012 Tony Asleson 0.0.9-1 - Initial checkin of lio plug-in - System filtering via URI (smispy) - Error code mapping (ontap) - Fixed build so same build tarball is used for all binaries * Mon Jun 4 2012 Tony Asleson 0.0.8-1 - Make building of SMI-S CPP plugin optional - Add pkg-config file - SMIS: Fix exception while retrieving Volumes - SMIS: Fix exception while retrieving Volumes - lsm: Add package imports - Make Smis class available in lsm python package - Add option to disable building C unit test - Make simulator classes available in lsm python package - Make ontap class available in lsm python package - Changes to support building on Fedora 17 (v2) - Spec. file updates from feedback from T. Callaway (spot) - F17 linker symbol visibility correction - Remove unneeded build dependencies and cleaned up some warnings - C Updates, client C library feature parity with python * Fri May 11 2012 Tony Asleson 0.0.7-1 - Bug fix for smi-s constants - Display formatting improvements - Added header option for lsmcli - Improved version handling for builds - Made terminology consistent - Ability to list visibility for access groups and volumes - Simulator plug-in fully supports all block operations - Added support for multiple systems with a single plug-in instance * Fri Apr 20 2012 Tony Asleson 0.0.6-1 - Documentation improvements (man & source code) - Support for access groups - Unified spec files Fedora/RHEL - Package version auto generate - Rpm target added to make - Bug fix for missing optional property on volume retrieval (smispy plug-in) * Fri Apr 6 2012 Tony Asleson 0.0.5-1 - Spec file clean-up improvements - Async. operation added to lsmcli and ability to check on job status - Sub volume replication support - Ability to check for child dependencies on VOLUMES, FS and files - SMI-S Bug fixes and improvements * Mon Mar 26 2012 Tony Asleson 0.0.4-1 - Restore from snapshot - Job identifiers string instead of integer - Updated license address * Wed Mar 14 2012 Tony Asleson 0.0.3-1 - Changes to installer, daemon uid, gid, /var/run/lsm/* - NFS improvements and bug fixes - Python library clean up (rpmlint errors) * Sun Mar 11 2012 Tony Asleson 0.0.2-1 - Added NetApp native plugin * Mon Feb 6 2012 Tony Asleson 0.0.1alpha-1 - Initial version of package libstoragemgmt-1.2.3/packaging/Makefile.am0000664000175000017500000000020512537546123015445 00000000000000## Process this file with automake to produce Makefile.in EXTRA_DIST = libstoragemgmt.spec.in libstoragemgmt.spec SUBDIRS = daemon libstoragemgmt-1.2.3/packaging/Makefile.in0000664000175000017500000004645012542455445015474 00000000000000# Makefile.in generated by automake 1.13.4 from Makefile.am. # @configure_input@ # Copyright (C) 1994-2013 Free Software Foundation, Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. @SET_MAKE@ VPATH = @srcdir@ am__is_gnu_make = test -n '$(MAKEFILE_LIST)' && test -n '$(MAKELEVEL)' am__make_running_with_option = \ case $${target_option-} in \ ?) ;; \ *) echo "am__make_running_with_option: internal error: invalid" \ "target option '$${target_option-}' specified" >&2; \ exit 1;; \ esac; \ has_opt=no; \ sane_makeflags=$$MAKEFLAGS; \ if $(am__is_gnu_make); then \ sane_makeflags=$$MFLAGS; \ else \ case $$MAKEFLAGS in \ *\\[\ \ ]*) \ bs=\\; \ sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \ | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \ esac; \ fi; \ skip_next=no; \ strip_trailopt () \ { \ flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \ }; \ for flg in $$sane_makeflags; do \ test $$skip_next = yes && { skip_next=no; continue; }; \ case $$flg in \ *=*|--*) continue;; \ -*I) strip_trailopt 'I'; skip_next=yes;; \ -*I?*) strip_trailopt 'I';; \ -*O) strip_trailopt 'O'; skip_next=yes;; \ -*O?*) strip_trailopt 'O';; \ -*l) strip_trailopt 'l'; skip_next=yes;; \ -*l?*) strip_trailopt 'l';; \ -[dEDm]) skip_next=yes;; \ -[JT]) skip_next=yes;; \ esac; \ case $$flg in \ *$$target_option*) has_opt=yes; break;; \ esac; \ done; \ test $$has_opt = yes am__make_dryrun = (target_option=n; $(am__make_running_with_option)) am__make_keepgoing = (target_option=k; $(am__make_running_with_option)) pkgdatadir = $(datadir)/@PACKAGE@ pkgincludedir = $(includedir)/@PACKAGE@ pkglibdir = $(libdir)/@PACKAGE@ pkglibexecdir = $(libexecdir)/@PACKAGE@ am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd install_sh_DATA = $(install_sh) -c -m 644 install_sh_PROGRAM = $(install_sh) -c install_sh_SCRIPT = $(install_sh) -c INSTALL_HEADER = $(INSTALL_DATA) transform = $(program_transform_name) NORMAL_INSTALL = : PRE_INSTALL = : POST_INSTALL = : NORMAL_UNINSTALL = : PRE_UNINSTALL = : POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ subdir = packaging DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/Makefile.am \ $(srcdir)/libstoragemgmt.spec.in ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = $(top_srcdir)/m4/ax_python_module.m4 \ $(top_srcdir)/m4/libtool.m4 $(top_srcdir)/m4/ltoptions.m4 \ $(top_srcdir)/m4/ltsugar.m4 $(top_srcdir)/m4/ltversion.m4 \ $(top_srcdir)/m4/lt~obsolete.m4 $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) mkinstalldirs = $(install_sh) -d CONFIG_HEADER = $(top_builddir)/config.h CONFIG_CLEAN_FILES = libstoragemgmt.spec CONFIG_CLEAN_VPATH_FILES = AM_V_P = $(am__v_P_@AM_V@) am__v_P_ = $(am__v_P_@AM_DEFAULT_V@) am__v_P_0 = false am__v_P_1 = : AM_V_GEN = $(am__v_GEN_@AM_V@) am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@) am__v_GEN_0 = @echo " GEN " $@; am__v_GEN_1 = AM_V_at = $(am__v_at_@AM_V@) am__v_at_ = $(am__v_at_@AM_DEFAULT_V@) am__v_at_0 = @ am__v_at_1 = SOURCES = DIST_SOURCES = RECURSIVE_TARGETS = all-recursive check-recursive cscopelist-recursive \ ctags-recursive dvi-recursive html-recursive info-recursive \ install-data-recursive install-dvi-recursive \ install-exec-recursive install-html-recursive \ install-info-recursive install-pdf-recursive \ install-ps-recursive install-recursive installcheck-recursive \ installdirs-recursive pdf-recursive ps-recursive \ tags-recursive uninstall-recursive am__can_run_installinfo = \ case $$AM_UPDATE_INFO_DIR in \ n|no|NO) false;; \ *) (install-info --version) >/dev/null 2>&1;; \ esac RECURSIVE_CLEAN_TARGETS = mostlyclean-recursive clean-recursive \ distclean-recursive maintainer-clean-recursive am__recursive_targets = \ $(RECURSIVE_TARGETS) \ $(RECURSIVE_CLEAN_TARGETS) \ $(am__extra_recursive_targets) AM_RECURSIVE_TARGETS = $(am__recursive_targets:-recursive=) TAGS CTAGS \ distdir am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) # Read a list of newline-separated strings from the standard input, # and print each of them once, without duplicates. Input order is # *not* preserved. am__uniquify_input = $(AWK) '\ BEGIN { nonempty = 0; } \ { items[$$0] = 1; nonempty = 1; } \ END { if (nonempty) { for (i in items) print i; }; } \ ' # Make sure the list of sources is unique. This is necessary because, # e.g., the same source file might be shared among _SOURCES variables # for different programs/libraries. am__define_uniq_tagged_files = \ list='$(am__tagged_files)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | $(am__uniquify_input)` ETAGS = etags CTAGS = ctags DIST_SUBDIRS = $(SUBDIRS) DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) am__relativize = \ dir0=`pwd`; \ sed_first='s,^\([^/]*\)/.*$$,\1,'; \ sed_rest='s,^[^/]*/*,,'; \ sed_last='s,^.*/\([^/]*\)$$,\1,'; \ sed_butlast='s,/*[^/]*$$,,'; \ while test -n "$$dir1"; do \ first=`echo "$$dir1" | sed -e "$$sed_first"`; \ if test "$$first" != "."; then \ if test "$$first" = ".."; then \ dir2=`echo "$$dir0" | sed -e "$$sed_last"`/"$$dir2"; \ dir0=`echo "$$dir0" | sed -e "$$sed_butlast"`; \ else \ first2=`echo "$$dir2" | sed -e "$$sed_first"`; \ if test "$$first2" = "$$first"; then \ dir2=`echo "$$dir2" | sed -e "$$sed_rest"`; \ else \ dir2="../$$dir2"; \ fi; \ dir0="$$dir0"/"$$first"; \ fi; \ fi; \ dir1=`echo "$$dir1" | sed -e "$$sed_rest"`; \ done; \ reldir="$$dir2" ACLOCAL = @ACLOCAL@ AMTAR = @AMTAR@ AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@ AR = @AR@ AUTOCONF = @AUTOCONF@ AUTOHEADER = @AUTOHEADER@ AUTOMAKE = @AUTOMAKE@ AWK = @AWK@ CC = @CC@ CCDEPMODE = @CCDEPMODE@ CFLAGS = @CFLAGS@ CPP = @CPP@ CPPFLAGS = @CPPFLAGS@ CXX = @CXX@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ DLLTOOL = @DLLTOOL@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ ECHO_T = @ECHO_T@ EGREP = @EGREP@ EXEEXT = @EXEEXT@ FGREP = @FGREP@ GREP = @GREP@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ JSON_CFLAGS = @JSON_CFLAGS@ JSON_LIBS = @JSON_LIBS@ LD = @LD@ LDFLAGS = @LDFLAGS@ LIBCHECK_CFLAGS = @LIBCHECK_CFLAGS@ LIBCHECK_LIBS = @LIBCHECK_LIBS@ LIBCONFIG_CFLAGS = @LIBCONFIG_CFLAGS@ LIBCONFIG_LIBS = @LIBCONFIG_LIBS@ LIBGLIB_CFLAGS = @LIBGLIB_CFLAGS@ LIBGLIB_LIBS = @LIBGLIB_LIBS@ LIBMICROHTTPD_CFLAGS = @LIBMICROHTTPD_CFLAGS@ LIBMICROHTTPD_LIBS = @LIBMICROHTTPD_LIBS@ LIBOBJS = @LIBOBJS@ LIBS = @LIBS@ LIBSM_LIBTOOL_VERSION = @LIBSM_LIBTOOL_VERSION@ LIBSM_MAJOR_VERSION = @LIBSM_MAJOR_VERSION@ LIBSM_MICRO_VERSION = @LIBSM_MICRO_VERSION@ LIBSM_MINOR_VERSION = @LIBSM_MINOR_VERSION@ LIBSM_VERSION = @LIBSM_VERSION@ LIBSM_VERSION_INFO = @LIBSM_VERSION_INFO@ LIBSM_VERSION_NUMBER = @LIBSM_VERSION_NUMBER@ LIBTOOL = @LIBTOOL@ LIBXML_CFLAGS = @LIBXML_CFLAGS@ LIBXML_LIBS = @LIBXML_LIBS@ LIPO = @LIPO@ LN_S = @LN_S@ LTLIBOBJS = @LTLIBOBJS@ MAINT = @MAINT@ MAKEINFO = @MAKEINFO@ MANIFEST_TOOL = @MANIFEST_TOOL@ MKDIR_P = @MKDIR_P@ NM = @NM@ NMEDIT = @NMEDIT@ OBJDUMP = @OBJDUMP@ OBJEXT = @OBJEXT@ OTOOL = @OTOOL@ OTOOL64 = @OTOOL64@ PACKAGE = @PACKAGE@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_URL = @PACKAGE_URL@ PACKAGE_VERSION = @PACKAGE_VERSION@ PATH_SEPARATOR = @PATH_SEPARATOR@ PERL = @PERL@ PKG_CONFIG = @PKG_CONFIG@ PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@ PKG_CONFIG_PATH = @PKG_CONFIG_PATH@ PYTHON = @PYTHON@ PYTHON_EXEC_PREFIX = @PYTHON_EXEC_PREFIX@ PYTHON_PLATFORM = @PYTHON_PLATFORM@ PYTHON_PREFIX = @PYTHON_PREFIX@ PYTHON_VERSION = @PYTHON_VERSION@ RANLIB = @RANLIB@ SED = @SED@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SSL_LIBS = @SSL_LIBS@ STRIP = @STRIP@ VERSION = @VERSION@ YAJL_LIBS = @YAJL_LIBS@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ ac_ct_AR = @ac_ct_AR@ ac_ct_CC = @ac_ct_CC@ ac_ct_CXX = @ac_ct_CXX@ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ am__include = @am__include@ am__leading_dot = @am__leading_dot@ am__quote = @am__quote@ am__tar = @am__tar@ am__untar = @am__untar@ bashcompletiondir = @bashcompletiondir@ bindir = @bindir@ build = @build@ build_alias = @build_alias@ build_cpu = @build_cpu@ build_os = @build_os@ build_vendor = @build_vendor@ builddir = @builddir@ datadir = @datadir@ datarootdir = @datarootdir@ docdir = @docdir@ dvidir = @dvidir@ exec_prefix = @exec_prefix@ host = @host@ host_alias = @host_alias@ host_cpu = @host_cpu@ host_os = @host_os@ host_vendor = @host_vendor@ htmldir = @htmldir@ includedir = @includedir@ infodir = @infodir@ install_sh = @install_sh@ libdir = @libdir@ libexecdir = @libexecdir@ localedir = @localedir@ localstatedir = @localstatedir@ mandir = @mandir@ mkdir_p = @mkdir_p@ oldincludedir = @oldincludedir@ pdfdir = @pdfdir@ pkgpyexecdir = @pkgpyexecdir@ pkgpythondir = @pkgpythondir@ prefix = @prefix@ program_transform_name = @program_transform_name@ psdir = @psdir@ pyexecdir = @pyexecdir@ pythondir = @pythondir@ sbindir = @sbindir@ sharedstatedir = @sharedstatedir@ srcdir = @srcdir@ sysconfdir = @sysconfdir@ systemdsystemunitdir = @systemdsystemunitdir@ target_alias = @target_alias@ top_build_prefix = @top_build_prefix@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ EXTRA_DIST = libstoragemgmt.spec.in libstoragemgmt.spec SUBDIRS = daemon all: all-recursive .SUFFIXES: $(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps) @for dep in $?; do \ case '$(am__configure_deps)' in \ *$$dep*) \ ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ && { if test -f $@; then exit 0; else break; fi; }; \ exit 1;; \ esac; \ done; \ echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu packaging/Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --gnu packaging/Makefile .PRECIOUS: Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ *) \ echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ esac; $(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps) cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh $(am__aclocal_m4_deps): libstoragemgmt.spec: $(top_builddir)/config.status $(srcdir)/libstoragemgmt.spec.in cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ mostlyclean-libtool: -rm -f *.lo clean-libtool: -rm -rf .libs _libs # This directory's subdirectories are mostly independent; you can cd # into them and run 'make' without going through this Makefile. # To change the values of 'make' variables: instead of editing Makefiles, # (1) if the variable is set in 'config.status', edit 'config.status' # (which will cause the Makefiles to be regenerated when you run 'make'); # (2) otherwise, pass the desired values on the 'make' command line. $(am__recursive_targets): @fail=; \ if $(am__make_keepgoing); then \ failcom='fail=yes'; \ else \ failcom='exit 1'; \ fi; \ dot_seen=no; \ target=`echo $@ | sed s/-recursive//`; \ case "$@" in \ distclean-* | maintainer-clean-*) list='$(DIST_SUBDIRS)' ;; \ *) list='$(SUBDIRS)' ;; \ esac; \ for subdir in $$list; do \ echo "Making $$target in $$subdir"; \ if test "$$subdir" = "."; then \ dot_seen=yes; \ local_target="$$target-am"; \ else \ local_target="$$target"; \ fi; \ ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \ || eval $$failcom; \ done; \ if test "$$dot_seen" = "no"; then \ $(MAKE) $(AM_MAKEFLAGS) "$$target-am" || exit 1; \ fi; test -z "$$fail" ID: $(am__tagged_files) $(am__define_uniq_tagged_files); mkid -fID $$unique tags: tags-recursive TAGS: tags tags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files) set x; \ here=`pwd`; \ if ($(ETAGS) --etags-include --version) >/dev/null 2>&1; then \ include_option=--etags-include; \ empty_fix=.; \ else \ include_option=--include; \ empty_fix=; \ fi; \ list='$(SUBDIRS)'; for subdir in $$list; do \ if test "$$subdir" = .; then :; else \ test ! -f $$subdir/TAGS || \ set "$$@" "$$include_option=$$here/$$subdir/TAGS"; \ fi; \ done; \ $(am__define_uniq_tagged_files); \ shift; \ if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ if test $$# -gt 0; then \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ "$$@" $$unique; \ else \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ $$unique; \ fi; \ fi ctags: ctags-recursive CTAGS: ctags ctags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files) $(am__define_uniq_tagged_files); \ test -z "$(CTAGS_ARGS)$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$unique GTAGS: here=`$(am__cd) $(top_builddir) && pwd` \ && $(am__cd) $(top_srcdir) \ && gtags -i $(GTAGS_ARGS) "$$here" cscopelist: cscopelist-recursive cscopelist-am: $(am__tagged_files) list='$(am__tagged_files)'; \ case "$(srcdir)" in \ [\\/]* | ?:[\\/]*) sdir="$(srcdir)" ;; \ *) sdir=$(subdir)/$(srcdir) ;; \ esac; \ for i in $$list; do \ if test -f "$$i"; then \ echo "$(subdir)/$$i"; \ else \ echo "$$sdir/$$i"; \ fi; \ done >> $(top_builddir)/cscope.files distclean-tags: -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags distdir: $(DISTFILES) @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ list='$(DISTFILES)'; \ dist_files=`for file in $$list; do echo $$file; done | \ sed -e "s|^$$srcdirstrip/||;t" \ -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ case $$dist_files in \ */*) $(MKDIR_P) `echo "$$dist_files" | \ sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ sort -u` ;; \ esac; \ for file in $$dist_files; do \ if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ if test -d $$d/$$file; then \ dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ if test -d "$(distdir)/$$file"; then \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ fi; \ cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ else \ test -f "$(distdir)/$$file" \ || cp -p $$d/$$file "$(distdir)/$$file" \ || exit 1; \ fi; \ done @list='$(DIST_SUBDIRS)'; for subdir in $$list; do \ if test "$$subdir" = .; then :; else \ $(am__make_dryrun) \ || test -d "$(distdir)/$$subdir" \ || $(MKDIR_P) "$(distdir)/$$subdir" \ || exit 1; \ dir1=$$subdir; dir2="$(distdir)/$$subdir"; \ $(am__relativize); \ new_distdir=$$reldir; \ dir1=$$subdir; dir2="$(top_distdir)"; \ $(am__relativize); \ new_top_distdir=$$reldir; \ echo " (cd $$subdir && $(MAKE) $(AM_MAKEFLAGS) top_distdir="$$new_top_distdir" distdir="$$new_distdir" \\"; \ echo " am__remove_distdir=: am__skip_length_check=: am__skip_mode_fix=: distdir)"; \ ($(am__cd) $$subdir && \ $(MAKE) $(AM_MAKEFLAGS) \ top_distdir="$$new_top_distdir" \ distdir="$$new_distdir" \ am__remove_distdir=: \ am__skip_length_check=: \ am__skip_mode_fix=: \ distdir) \ || exit 1; \ fi; \ done check-am: all-am check: check-recursive all-am: Makefile installdirs: installdirs-recursive installdirs-am: install: install-recursive install-exec: install-exec-recursive install-data: install-data-recursive uninstall: uninstall-recursive install-am: all-am @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am installcheck: installcheck-recursive install-strip: if test -z '$(STRIP)'; then \ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ install; \ else \ $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ fi mostlyclean-generic: clean-generic: distclean-generic: -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) maintainer-clean-generic: @echo "This command is intended for maintainers to use" @echo "it deletes files that may require special tools to rebuild." clean: clean-recursive clean-am: clean-generic clean-libtool mostlyclean-am distclean: distclean-recursive -rm -f Makefile distclean-am: clean-am distclean-generic distclean-tags dvi: dvi-recursive dvi-am: html: html-recursive html-am: info: info-recursive info-am: install-data-am: install-dvi: install-dvi-recursive install-dvi-am: install-exec-am: install-html: install-html-recursive install-html-am: install-info: install-info-recursive install-info-am: install-man: install-pdf: install-pdf-recursive install-pdf-am: install-ps: install-ps-recursive install-ps-am: installcheck-am: maintainer-clean: maintainer-clean-recursive -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic mostlyclean: mostlyclean-recursive mostlyclean-am: mostlyclean-generic mostlyclean-libtool pdf: pdf-recursive pdf-am: ps: ps-recursive ps-am: uninstall-am: .MAKE: $(am__recursive_targets) install-am install-strip .PHONY: $(am__recursive_targets) CTAGS GTAGS TAGS all all-am check \ check-am clean clean-generic clean-libtool cscopelist-am ctags \ ctags-am distclean distclean-generic distclean-libtool \ distclean-tags distdir dvi dvi-am html html-am info info-am \ install install-am install-data install-data-am install-dvi \ install-dvi-am install-exec install-exec-am install-html \ install-html-am install-info install-info-am install-man \ install-pdf install-pdf-am install-ps install-ps-am \ install-strip installcheck installcheck-am installdirs \ installdirs-am maintainer-clean maintainer-clean-generic \ mostlyclean mostlyclean-generic mostlyclean-libtool pdf pdf-am \ ps ps-am tags tags-am uninstall uninstall-am # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: libstoragemgmt-1.2.3/packaging/libstoragemgmt.spec.in0000664000175000017500000010123612542455432017716 00000000000000%bcond_with rest_api %bcond_without megaraid %bcond_without hpsa %bcond_without test # Use one-line macro for OBS workaround: # https://bugzilla.novell.com/show_bug.cgi?id=864323 %{?_with_rest_api: %global with_rest_api 1 } %{?_without_rest_api: %global with_rest_api 0 } %{?_with_megaraid: %global with_megaraid 1 } %{?_without_megaraid: %global with_megaraid 0 } %{?_with_hpsa: %global with_hpsa 1 } %{?_without_hpsa: %global with_hpsa 0 } %{?_with_test: %global with_test 1 } %{?_without_test: %global with_test 0 } %define libsoname libstoragemgmt %if 0%{?suse_version} || 0%{?fedora} >= 15 || 0%{?rhel} >= 7 %define with_systemd 1 %endif %global libstoragemgmt libstoragemgmt %if 0%{?suse_version} %global libstoragemgmt libstoragemgmt1 %endif %define udev_dir /lib # Later versions moved /lib to /usr/lib %if 0%{?fedora} >= 18 || 0%{?rhel} >= 7 || 0%{?suse_version} %define udev_dir /usr/lib %endif %if 0%{?suse_version} # Use fdupes on openSuSE. # For Fedora, it will conflict with brp-python-bytecompile # For RHEL, fdupes is in EPEL repo. %define do_fdupes 1 %endif Name: libstoragemgmt Version: @VERSION@ Release: 1%{?dist} Summary: Storage array management library Group: System Environment/Libraries %if 0%{?suse_version} License: LGPL-2.1+ %else License: LGPLv2+ %endif URL: https://github.com/libstorage/libstoragemgmt/ Source0: https://github.com/libstorage/libstoragemgmt/releases/download/%{version}/libstoragemgmt-%{version}.tar.gz BuildRoot: %{_tmppath}/%{name}-%{version}-%{release}-root-%(%{__id_u} -n) Requires: %{libstoragemgmt}-python BuildRequires: autoconf automake libtool libxml2-devel check-devel perl BuildRequires: openssl-devel BuildRequires: python-argparse BuildRequires: glib2-devel # Explicitly require gcc-c++ is for OBS BuildRequires: gcc-c++ BuildRequires: libconfig-devel %if 0%{?suse_version} BuildRequires: libyajl-devel %else # Fedora RHEL BuildRequires: yajl-devel %endif %if 0%{?do_fdupes} BuildRequires: fdupes %endif %if 0%{?rhel} == 6 BuildRequires: python-ordereddict %else # Require bash-completion > 2.0 BuildRequires: bash-completion >= 2.0 %endif %if 0%{?with_systemd} BuildRequires: systemd Requires(post): systemd Requires(preun): systemd Requires(postun): systemd %endif %description The libStorageMgmt library will provide a vendor agnostic open source storage application programming interface (API) that will allow management of storage arrays. The library includes a command line interface for interactive use and scripting (command lsmcli). The library also has a daemon that is used for executing plug-ins in a separate process (lsmd). %if %{libstoragemgmt} != %{name} %package -n %{libstoragemgmt} Summary: Storage array management library Group: System Environment/Libraries Requires: %{libstoragemgmt}-python %description -n %{libstoragemgmt} The libStorageMgmt library will provide a vendor agnostic open source storage application programming interface (API) that will allow management of storage arrays. The library includes a command line interface for interactive use and scripting (command lsmcli). The library also has a daemon that is used for executing plug-ins in a separate process (lsmd). %endif %package devel Summary: Development files for %{name} Group: Development/Libraries Requires: %{name}%{?_isa} = %{version}-%{release} %description devel The %{name}-devel package contains libraries and header files for developing applications that use %{name}. %package -n %{libstoragemgmt}-python Summary: Python client libraries and plug-in support for %{libstoragemgmt} Group: System Environment/Libraries Requires: %{libstoragemgmt} = %{version}-%{release} BuildArch: noarch Requires: python-argparse %if 0%{?rhel} == 6 # No way to detect 6.2 yet. Just forcing all RHEL 6 to install # python-ordereddict just in case. Requires: python-ordereddict %endif %description -n %{libstoragemgmt}-python The %{libstoragemgmt}-python package contains python client libraries as well as python framework support and open source plug-ins written in python. %package -n %{libstoragemgmt}-smis-plugin Summary: Files for SMI-S generic array support for %{libstoragemgmt} Group: System Environment/Libraries %if 0%{?suse_version} BuildRequires: python-pywbem Requires: python-pywbem %else BuildRequires: pywbem Requires: pywbem %endif Requires: %{libstoragemgmt}-python = %{version}-%{release} BuildArch: noarch %description -n %{libstoragemgmt}-smis-plugin The %{libstoragemgmt}-smis-plugin package contains plug-in for generic SMI-S array support. %package -n %{libstoragemgmt}-netapp-plugin Summary: Files for NetApp array support for %{libstoragemgmt} Group: System Environment/Libraries %if 0%{?suse_version} BuildRequires: python-M2Crypto Requires: python-M2Crypto %else BuildRequires: m2crypto Requires: m2crypto %endif Requires: %{libstoragemgmt}-python = %{version}-%{release} BuildArch: noarch %description -n %{libstoragemgmt}-netapp-plugin The %{libstoragemgmt}-netapp-plugin package contains plug-in for NetApp array support. %package -n %{libstoragemgmt}-targetd-plugin Summary: Files for targetd array support for %{libstoragemgmt} Group: System Environment/Libraries Requires: %{libstoragemgmt}-python = %{version}-%{release} BuildArch: noarch %description -n %{libstoragemgmt}-targetd-plugin The %{libstoragemgmt}-targetd-plugin package contains plug-in for targetd array support. %package -n %{libstoragemgmt}-nstor-plugin Summary: Files for NexentaStor array support for %{libstoragemgmt} Group: System Environment/Libraries Requires: %{libstoragemgmt}-python = %{version}-%{release} BuildArch: noarch %description -n %{libstoragemgmt}-nstor-plugin The %{libstoragemgmt}-nstor-plugin package contains plug-in for NexentaStor array support. %package udev Summary: Udev files for %{name} Group: System Environment/Base %description udev The %{name}-udev package contains udev rules and helper utilities for uevents generated by the kernel. %if 0%{?with_rest_api} %package -n %{libstoragemgmt}-rest Summary: REST API daemon for %{libstoragemgmt} Group: System Environment/Daemons Requires: %{libstoragemgmt}%{?_isa} = %{version}-%{release} BuildRequires: libmicrohttpd-devel %if 0%{?suse_version} BuildRequires: libjson-devel procps Requires: libjson0 Requires: libmicrohttpd10 %else # Fedora RHEL BuildRequires: json-c-devel Requires: json-c Requires: libmicrohttpd %endif %description -n %{libstoragemgmt}-rest the %{libstoragemgmt}-rest package contains the http daemon for %{libstoragemgmt} rest api. %endif %if 0%{?with_megaraid} %package -n %{libstoragemgmt}-megaraid-plugin Summary: Files for LSI MegaRAID support for %{libstoragemgmt} Group: System Environment/Libraries Requires: %{libstoragemgmt}%{?_isa} = %{version}-%{release} BuildArch: noarch %description -n %{libstoragemgmt}-megaraid-plugin The %{libstoragemgmt}-megaraid-plugin package contains the plugin for LSI MegaRAID storage management via storcli. %endif %if 0%{?with_hpsa} %package -n %{libstoragemgmt}-hpsa-plugin Summary: Files for HP SmartArray support for %{libstoragemgmt} Group: System Environment/Libraries Requires: %{libstoragemgmt}%{?_isa} = %{version}-%{release} BuildArch: noarch %description -n %{libstoragemgmt}-hpsa-plugin The %{libstoragemgmt}-hpsa-plugin package contains the plugin for HP SmartArray storage management via hpssacli. %endif %prep %setup -q %build ./autogen.sh %configure \ %if 0%{?with_rest_api} != 1 --without-rest-api \ %endif %if 0%{?with_megaraid} != 1 --without-megaraid \ %endif %if 0%{?with_hpsa} != 1 --without-hpsa \ %endif --disable-static V=1 make %{?_smp_mflags} %install rm -rf %{buildroot} make install DESTDIR=%{buildroot} find %{buildroot} -name '*.la' -exec rm -f {} ';' %if 0%{?with_systemd} install -d -m755 %{buildroot}/%{_unitdir} install -m644 packaging/daemon/libstoragemgmt.service \ %{buildroot}/%{_unitdir}/libstoragemgmt.service #tempfiles.d configuration for /var/run mkdir -p %{buildroot}/%{_tmpfilesdir} install -m 0644 packaging/daemon/lsm-tmpfiles.conf \ %{buildroot}/%{_tmpfilesdir}/%{name}.conf %else #Need these to exist at install so we can start the daemon mkdir -p %{buildroot}/etc/rc.d/init.d install packaging/daemon/libstoragemgmtd \ %{buildroot}/etc/rc.d/init.d/libstoragemgmtd %endif #Files for udev handling mkdir -p %{buildroot}/%{udev_dir}/udev/rules.d install -m 644 tools/udev/90-scsi-ua.rules \ %{buildroot}/%{udev_dir}/udev/rules.d/90-scsi-ua.rules install -m 755 tools/udev/scan-scsi-target \ %{buildroot}/%{udev_dir}/udev/scan-scsi-target %if 0%{?with_rest_api} %if 0%{?with_systemd} %{__install} -m 0644 packaging/daemon/libstoragemgmt-rest.service \ %{buildroot}/%{_unitdir}/libstoragemgmt-rest.service %endif %endif # Deduplication %if 0%{?do_fdupes} %fdupes -s %{buildroot}/%{python_sitelib}/lsm %endif %clean rm -rf %{buildroot} %if 0%{?with_test} %check if ! make check then cat test/test-suite.log || true exit 1 fi %endif %pre -n %{libstoragemgmt} if [ $1 -eq 1 ]; then #New install. getent group libstoragemgmt >/dev/null || groupadd -r libstoragemgmt getent passwd libstoragemgmt >/dev/null || \ useradd -r -g libstoragemgmt -d %{_localstatedir}/run/lsm \ -s /sbin/nologin \ -c "daemon account for libstoragemgmt" libstoragemgmt #Need these to exist at install so we can start the daemon mkdir -p %{_localstatedir}/run/lsm/ipc chmod 0755 %{_localstatedir}/run/lsm chmod 0755 %{_localstatedir}/run/lsm/ipc chown libstoragemgmt:libstoragemgmt %{_localstatedir}/run/lsm chown libstoragemgmt:libstoragemgmt %{_localstatedir}/run/lsm/ipc fi %post -n %{libstoragemgmt} /sbin/ldconfig %if 0%{?with_systemd} %if 0%{?suse_version} %service_add_post libstoragemgmt.service %else %systemd_post libstoragemgmt.service %endif %else /sbin/chkconfig --add libstoragemgmtd %endif %preun -n %{libstoragemgmt} %if 0%{?with_systemd} %if 0%{?suse_version} %service_del_preun libstoragemgmt.service %else %systemd_preun libstoragemgmt.service %endif %else /etc/rc.d/init.d/libstoragemgmtd stop > /dev/null 2>&1 || : /sbin/chkconfig --del libstoragemgmtd %endif %postun -n %{libstoragemgmt} /sbin/ldconfig %if 0%{?with_systemd} %if 0%{?suse_version} %service_del_postun libstoragemgmt.service %else %systemd_postun libstoragemgmt.service %endif %else #Restart the daemond /etc/rc.d/init.d/libstoragemgmtd restart >/dev/null 2>&1 || : %endif %if 0%{?with_rest_api} %post -n %{libstoragemgmt}-rest %if 0%{?with_systemd} %if 0%{?suse_version} %service_add_post libstoragemgmt-rest.service %else %systemd_post libstoragemgmt-rest.service %endif %endif %preun -n %{libstoragemgmt}-rest %if 0%{?with_systemd} %if 0%{?suse_version} %service_del_preun libstoragemgmt-rest.service %else %systemd_preun libstoragemgmt-rest.service %endif %endif %postun -n %{libstoragemgmt}-rest %if 0%{?with_systemd} %if 0%{?suse_version} %service_del_postun libstoragemgmt-rest.service %else %systemd_postun libstoragemgmt-rest.service %endif %endif %endif # Need to restart lsmd if plugin is new installed or removed. %post -n %{libstoragemgmt}-smis-plugin if [ $1 -eq 1 ]; then # New install. /usr/bin/systemctl try-restart libstoragemgmt.service \ >/dev/null 2>&1 || : fi %postun -n %{libstoragemgmt}-smis-plugin if [ $1 -eq 0 ]; then # Remove /usr/bin/systemctl try-restart libstoragemgmt.service \ >/dev/null 2>&1 || : fi # Need to restart lsmd if plugin is new installed or removed. %post -n %{libstoragemgmt}-netapp-plugin if [ $1 -eq 1 ]; then # New install. /usr/bin/systemctl try-restart libstoragemgmt.service \ >/dev/null 2>&1 || : fi %postun -n %{libstoragemgmt}-netapp-plugin if [ $1 -eq 0 ]; then # Remove /usr/bin/systemctl try-restart libstoragemgmt.service \ >/dev/null 2>&1 || : fi # Need to restart lsmd if plugin is new installed or removed. %post -n %{libstoragemgmt}-targetd-plugin if [ $1 -eq 1 ]; then # New install. /usr/bin/systemctl try-restart libstoragemgmt.service \ >/dev/null 2>&1 || : fi %postun -n %{libstoragemgmt}-targetd-plugin if [ $1 -eq 0 ]; then # Remove /usr/bin/systemctl try-restart libstoragemgmt.service \ >/dev/null 2>&1 || : fi # Need to restart lsmd if plugin is new installed or removed. %post -n %{libstoragemgmt}-nstor-plugin if [ $1 -eq 1 ]; then # New install. /usr/bin/systemctl try-restart libstoragemgmt.service \ >/dev/null 2>&1 || : fi %postun -n %{libstoragemgmt}-nstor-plugin if [ $1 -eq 0 ]; then # Remove /usr/bin/systemctl try-restart libstoragemgmt.service \ >/dev/null 2>&1 || : fi %if 0%{?with_megaraid} # Need to restart lsmd if plugin is new installed or removed. %post -n %{libstoragemgmt}-megaraid-plugin if [ $1 -eq 1 ]; then # New install. /usr/bin/systemctl try-restart libstoragemgmt.service \ >/dev/null 2>&1 || : fi %postun -n %{libstoragemgmt}-megaraid-plugin if [ $1 -eq 0 ]; then # Remove /usr/bin/systemctl try-restart libstoragemgmt.service \ >/dev/null 2>&1 || : fi %endif %if 0%{?with_hpsa} # Need to restart lsmd if plugin is new installed or removed. %post -n %{libstoragemgmt}-hpsa-plugin if [ $1 -eq 1 ]; then # New install. /usr/bin/systemctl try-restart libstoragemgmt.service \ >/dev/null 2>&1 || : fi %postun -n %{libstoragemgmt}-hpsa-plugin if [ $1 -eq 0 ]; then # Remove /usr/bin/systemctl try-restart libstoragemgmt.service \ >/dev/null 2>&1 || : fi %endif %files -n %{libstoragemgmt} %defattr(-,root,root,-) %doc README COPYING.LIB %{_mandir}/man1/lsmcli.1* %{_mandir}/man1/lsmd.1* %{_mandir}/man5/lsmd.conf.5* %{_libdir}/*.so.* %{_bindir}/lsmcli %{_bindir}/lsmd %{_bindir}/simc_lsmplugin %{_sysconfdir}/lsm/lsmd.conf %dir %{_sysconfdir}/lsm %dir %{_sysconfdir}/lsm/pluginconf.d %{_mandir}/man1/simc_lsmplugin.1* %if 0%{?rhel} == 6 %{_sysconfdir}/bash_completion.d/lsmcli %else %{_datadir}/bash-completion/completions/lsmcli %endif %if 0%{?with_systemd} %{_unitdir}/libstoragemgmt.service %endif %if 0%{?with_systemd} %attr(0644, root, root) %{_tmpfilesdir}/%{name}.conf %else %attr(0755, root, root) /etc/rc.d/init.d/libstoragemgmtd %endif %files devel %defattr(-,root,root,-) %{_includedir}/* %{_libdir}/*.so %{_libdir}/pkgconfig/libstoragemgmt.pc %files -n %{libstoragemgmt}-python %defattr(-,root,root,-) #Python library files %dir %{python_sitelib}/lsm %{python_sitelib}/lsm/__init__.* %dir %{python_sitelib}/lsm/external %{python_sitelib}/lsm/external/* %{python_sitelib}/lsm/_client.* %{python_sitelib}/lsm/_common.* %{python_sitelib}/lsm/_data.* %{python_sitelib}/lsm/_iplugin.* %{python_sitelib}/lsm/_pluginrunner.* %{python_sitelib}/lsm/_transport.* %{python_sitelib}/lsm/version.* %dir %{python_sitelib}/lsm/plugin %{python_sitelib}/lsm/plugin/__init__.* %dir %{python_sitelib}/lsm/plugin/sim %{python_sitelib}/lsm/plugin/sim/__init__.* %{python_sitelib}/lsm/plugin/sim/simulator.* %{python_sitelib}/lsm/plugin/sim/simarray.* %dir %{python_sitelib}/lsm/lsmcli %{python_sitelib}/lsm/lsmcli/__init__.* %{python_sitelib}/lsm/lsmcli/data_display.* %{python_sitelib}/lsm/lsmcli/cmdline.* %{_bindir}/sim_lsmplugin %{_sysconfdir}/lsm/pluginconf.d/sim.conf %{_mandir}/man1/sim_lsmplugin.1* %files -n %{libstoragemgmt}-smis-plugin %defattr(-,root,root,-) %dir %{python_sitelib}/lsm/plugin/smispy %{python_sitelib}/lsm/plugin/smispy/__init__.* %{python_sitelib}/lsm/plugin/smispy/smis.* %{python_sitelib}/lsm/plugin/smispy/dmtf.* %{python_sitelib}/lsm/plugin/smispy/utils.* %{python_sitelib}/lsm/plugin/smispy/smis_common.* %{python_sitelib}/lsm/plugin/smispy/smis_cap.* %{python_sitelib}/lsm/plugin/smispy/smis_sys.* %{python_sitelib}/lsm/plugin/smispy/smis_pool.* %{python_sitelib}/lsm/plugin/smispy/smis_disk.* %{python_sitelib}/lsm/plugin/smispy/smis_vol.* %{python_sitelib}/lsm/plugin/smispy/smis_ag.* %{_bindir}/smispy_lsmplugin %{_mandir}/man1/smispy_lsmplugin.1* %files -n %{libstoragemgmt}-netapp-plugin %defattr(-,root,root,-) %dir %{python_sitelib}/lsm/plugin/ontap %{python_sitelib}/lsm/plugin/ontap/__init__.* %{python_sitelib}/lsm/plugin/ontap/na.* %{python_sitelib}/lsm/plugin/ontap/ontap.* %{_bindir}/ontap_lsmplugin %{_mandir}/man1/ontap_lsmplugin.1* %files -n %{libstoragemgmt}-targetd-plugin %defattr(-,root,root,-) %dir %{python_sitelib}/lsm/plugin/targetd %{python_sitelib}/lsm/plugin/targetd/__init__.* %{python_sitelib}/lsm/plugin/targetd/targetd.* %{_bindir}/targetd_lsmplugin %{_mandir}/man1/targetd_lsmplugin.1* %files -n %{libstoragemgmt}-nstor-plugin %defattr(-,root,root,-) %dir %{python_sitelib}/lsm/plugin/nstor %{python_sitelib}/lsm/plugin/nstor/__init__.* %{python_sitelib}/lsm/plugin/nstor/nstor.* %{_bindir}/nstor_lsmplugin %{_mandir}/man1/nstor_lsmplugin.1* %if 0%{?with_megaraid} %files -n %{libstoragemgmt}-megaraid-plugin %defattr(-,root,root,-) %dir %{python_sitelib}/lsm/plugin/megaraid %{python_sitelib}/lsm/plugin/megaraid/__init__.* %{python_sitelib}/lsm/plugin/megaraid/megaraid.* %{python_sitelib}/lsm/plugin/megaraid/utils.* %{_bindir}/megaraid_lsmplugin %{_sysconfdir}/lsm/pluginconf.d/megaraid.conf %{_mandir}/man1/megaraid_lsmplugin.1* %endif %if 0%{?with_hpsa} %files -n %{libstoragemgmt}-hpsa-plugin %defattr(-,root,root,-) %dir %{python_sitelib}/lsm/plugin/hpsa %{python_sitelib}/lsm/plugin/hpsa/__init__.* %{python_sitelib}/lsm/plugin/hpsa/hpsa.* %{python_sitelib}/lsm/plugin/hpsa/utils.* %{_bindir}/hpsa_lsmplugin %{_sysconfdir}/lsm/pluginconf.d/hpsa.conf %{_mandir}/man1/hpsa_lsmplugin.1* %endif %files udev %defattr(-,root,root,-) %{udev_dir}/udev/scan-scsi-target %{udev_dir}/udev/rules.d/90-scsi-ua.rules %if 0%{?with_rest_api} %files -n %{libstoragemgmt}-rest %defattr(-,root,root,-) %{_bindir}/lsm_restd %if 0%{?with_systemd} %{_unitdir}/libstoragemgmt-rest.service %endif %endif %changelog * Wed Jun 24 2015 Gris Ge 1.2.3-1 - Bug fixes: * lsmcli bash completion: Fix syntax error. * lsmcli bash completion: Fix volume-delete. * lsmcli bash completion: Add missing completions. * Tue Jun 23 2015 Gris Ge 1.2.2-1 - Bug fixes: * Fix: selinux dac_override * Manpage: Update hpsa and megaraid plugin manpages. * HP Smart Array Plugin: Fix pool querying on P410i. * MegaRAID Plugin: Fix bug when no volume configured. * Wed Jun 17 2015 Gris Ge 1.2.1-1 - Bug fix: * Fix 'make distcheck' error on bash-completion. * Tue Jun 16 2015 Gris Ge 1.2.0-1 - New features: * The lsmd daemon now allows plugin to run as root using configure file. * Targetd plugin got full access group support. * The simulator plugin switched from risky pickle to sqlite3 for state saving. * Introduced bash auto completion support for lsmcli command. * Two new plugins for hardware RAID cards: * LSI MegaRAID plugin -- megaraid:// Dell PERC and other OEM rebanded MegaRAID cards are also supported. * HP SmartArray plugin -- hpsa:// - Library adds: * New method to query RAID information of volume: Python: lsm.Client.volume_raid_info(). C: lsm_volume_raid_info(). * New method to query pool membership: Python: lsm.Client.pool_member_info(). C: lsm_pool_member_info(). * New disk status constant to indicate certain disk could be used as pool disk or dedicate spare disk. Python: DISK.STATUS_FREE. C: LSM_DISK_STATUS_FREE. * New method to create RAID volume on hardware RAID cards: Python: lsm.Client.volume_raid_create_cap_get(). lsm.Client.volume_raid_create(). C: lsm_volume_raid_create_cap_get(). lsm_volume_raid_create(). * New C plugin register interface for version 1.2 new methods: lsm_register_plugin_v1_2() - Bug fixes and miscellaneous fixes: * lsmd: Fix a possible infinity loop on plugin search. * Fix memory leak in C unit test. * Library: Fix incorrect Volume.vpd83 definition * SMI-S plugin: Fix SMI-S plugin spare disk support. * SMI-S plugin: Fix target_ports() for HP 3PAR and EMC VMAX. * SMI-S plugin: Fix the incorrect profile_check(). * Fix C library compile warning. * Fix C library potential memory leak in handle_volume_raid_create(). * ONTAP, MegaRAID, SMI-S Plugins: Enforce the definition of Pool.status. * Change license statement by replacing address to URL. * lsmd: add error catch for signal. * lsmcli: fix _get_item error message. * C Library: Fix message loop * C Library: Clean up code for DRY(don't repeat yourself). * SMI-S Plugin: Collect xml during exception. * C Library: Remove ambiguity between wrong type or OOM * C code clean up to use K&R code style. * Add Charles Rose to AUTHORS. * Thu Dec 4 2014 Tony Asleson 1.1.0-1 - Library adds: API Constants for new pool element types and plugin changes to support it * C constants: LSM_POOL_ELEMENT_TYPE_VOLUME_FULL, LSM_POOL_ELEMENT_TYPE_VOLUME_THIN * Py constants: Pool.ELEMENT_TYPE_VOLUME_FULL, Poll.ELEMENT_TYPE_THIN lsmcli: * lt - Alias for 'list --type target_ports' * Removed --init for volume-mask, it was broken for targetd (the only user) and instead of fixing we are going to improve targetd to support access groups in the next release - Numerous code improvements, including a big SMI-S plugin refactor, source code documentation corrections - Bug fix: Use correct default values for anonymous uid/gid in lsmcli - Bug fix: simc simulator not working for allowable NULL parameters for: * fs_child_dependency * fs_child_dependency_rm * fs_snapshot_restore - Bug fix: lsm_restd memory leak corrections - Bug fix: NetApp plugin, correctly set export path when caller specifies default in API - Bug fix: Add file locking to sim plugin to prevent concurrent modification - Bug fix: Consistently report common error conditions for NO_STATE_CHANGE, EXISTS_INITIATOR for all plugins - Bug fix: Number of bugs addressed in SMI-S plugin including: * EMC: Correct error path when replicating a volume with a duplicate volume name * HDS: Correctly create thinly provisioned volume on thinly provisioned pool * Sun Sep 7 2014 Tony Asleson 1.0.0-1 - Release version 1 - Numerous constants re-naming & removing - Removed the pool create/delete until things work better, esp. WRT SMI-S - Added checks for initiator ID verification - Added checks for vpd 0x83 verification - Simplified error logging (removed domain & level) - Re-named functions for online,offline -> enable,disable - Always use objects instead of object ID in function params - Removed individual files from fs snapshot creation - Add unsupported actions for pools - lsm_capability_set_n uses a -1 to terminate list - Volume status removed, replaced with admin state - Removed ibmiv7k plugin - Explicitly specify python2 - Error path consistency changes (same error for same condition across plug-ins) - Numerous bug fixes * Thu Jul 3 2014 Tony Asleson 0.1.0-1 - Release candidate for a 1.0.0 release - Optional data removed - Initiator only functions removed - Pool create from from volumes removed - Code directory structure updated - Target port listing added * Tue Feb 18 2014 Gris Ge 0.0.24-2 - Introduce a REST daemon(only for systemd yet). - Allowing enable/disable ibm_v7k plugin via 'rpmbuild --with ibm_v7k' or 'rpmbuild --without ibm_v7k'. - Fix the compile warning about data of changelog by changing 'Thu Nov 27 2013' to 'Wed Nov 27 2013'. * Thu Jan 30 2014 Tony Asleson 0.0.24-1 - Command line interface (CLI) re-factored and improved to be easier to use and more consistent, man pages have been updated - Command line output now has '-s, --script' for an additional way to output information for consumption in scripts - Command line option '-o' for retrieving optional/extended data for disks & pools - Pool creation/deleting in CLI & python API - Numerous small bug fixes - C API, added ability to list disks, list plugins and retrieve optional data for disks - SSL for SMI-S is more stringent on certificate checking for newer distributions, new URI option "no_ssl_verify=yes" to disable * Wed Nov 27 2013 Tony Asleson 0.0.23-1 - Addition of listing disks implemented for SMI-S and Ontap plugins (new, not in C library yet) - Add the ability to list currently installed and usable plug-ins - Verify return types are correct in python client calls - Added the ability to retrieve optional data (new, not in C library yet) - Visibility reductions for python code (somethings were public when should be private - Add calls to create/delete pools (new, not in C library yet) - Add missing initiator type for SAS - Improved vpd83 retrieval for SMI-S - Performance improvements for SMI-S plug-in - Numerous small bug fixes - Nstor plugin, additional testing and bug fixes - lsmd, added call to setgroups and enable full relo and PIE (ASLR) for security improvements - simulator state is now versioned - SCSI Unit Attention uevent handling * Mon Aug 12 2013 Tony Asleson 0.0.22-1 - Numerous code improvments/fixes - BZ 968384 - BZ 990577 * Tue Jul 16 2013 Tony Asleson 0.0.21-1 - Don't include IBM7K plugin for RHEL > 6 missing paramakio - IEC binary size handling - Functionality improvements for IBM V7K array - Workaround for python bug on F19 - Bugfix (BZ 968384) - Package plug-ins as separately in rpm packages * Fri May 24 2013 Tony Asleson 0.0.20-1 - Python library files now in separate rpm - Additional debug for plug-ins when exceptions occur - iSCSI CHAP support modified to handle both inbound and outbound authentication - VOLUME_THIN Added as new capability flag - IBM V7000 storage array support - NFS export support for targetd - EXPORT_CUSTOM_PATH added capability flag * Sat Apr 20 2013 Tony Asleson 0.0.19-1 - Improved E-Series array support - Ontap plug-in: improve performance with many Volumes - lsmcli: Number of corrections on handling unit specifiers - lsmcli: Correct stack track when stdout is written to while closed - Fix build to work with automake >= 1.12 * Thu Mar 7 2013 Tony Asleson 0.0.18-1 - lsmd: Re-written in C - Simplify fs_delete - Corrections for C client against Python plugin - Testing: Run cross language unit test too - Initial FS support for targetd plugin - Fix multi-arch python issues which prevent py and compiled py files from being identical on different arches * Thu Jan 31 2013 Tony Asleson 0.0.17-1 - Inconsistency corrections between C and Python API - Source code documentation updates - NexentaStor plug-in has been added * Wed Jan 2 2013 Tony Asleson 0.0.16-1 - lsmcli: Add confirmation prompt for data loss operations - lsmcli: Display enumerated values as text - lsmcli: Exit with 7 for --job-status when not complete - Fixed URI example to reference an existing plug-in - lsmcli: Retrieve plug-in desc. and version (lsmcli --plugin-info) - simc: Implement CHAP auth function (no-op) - lsmcli: Change check for determining if lsmd is running - Disable mirroring for SMI-S as it needs some re-work * Mon Nov 19 2012 Tony Asleson 0.0.15-1 - Pool parameter is optional when replicating a volume - Code improvements(Memory leak fix, lsmcli checks if lsmd is running) - Source code documentation updates - Ability to override simulator data storage location - make check target added to run unit tests * Fri Oct 19 2012 Tony Asleson 0.0.14-1 - test/cmdline.py added to automatically test what an array supports - Bug fixes (local plug-in execution, smi-s delete clone, code warnings) - targetd: (uri syntax consistency change, initialization code change) - Pool id added to volume information - lsmcli: Added --replicate-volume-range-block-size to retrieve replicated block size * Fri Sep 28 2012 Tony Asleson (Red Hat) 0.0.13-1 - targetD Feature adds/fixes for initiators, init_granted_to_volume, volumes_accessible_by_init, initiator_grant, initiator_revoke - SMI-S added compatibility with CIM_StorageConfigurationService - SMI-S bug fixes/changes to support XIV arrays (Basic functionality verified) - SMI-S Proxy layer added to allow different internal implementations of smi-s client - Added missing version information for C plug-in API - lsmcli URI can be stored in file .lsmcli in users home directory * Fri Sep 07 2012 Tony Asleson (Red Hat) 0.0.12-1 - SMI-S plug-in enhancements (Detach before delete, bug fixes for eSeries) - Added version specifier for non-opaque structs in plug-in callback interface - Documentation updates (doxygen, man pages) - Ontap plug-in: support timeout values - lsmcli, return back async. values other than volumes when using --job-status * Mon Aug 13 2012 Tony Asleson 0.0.11-1 - SMI-S fixes and improvements (WaitForCopyState, _get_class_instance) - Methods for arrays that don't support access groups to grant access for luns to initiators etc. - ISCSI Chap authentication - System level status field for overall array status - targetd updates for mapping targets to initiators - Simulator updates (python & C) - Removed tog-pegasus dependency (SMI-S is python plug-in) - Removed lsmVolumeStatus as it was implemented and redundant - initscript, check for /var/run and create if missing * Fri Jul 20 2012 Tony Asleson 0.0.10-1 - Numerous updates and re-name for plug-in targetd_lsmplugin - targetd_lsmplugin included in release - Memory leak fixes and improved unit tests - Initial capability query support, implemented for all plug-ins - Flags variable added to API calls, (Warning: C API/ABI breakage, python unaffected) - Bug fixes for NetApp ontap plug-in - SMI-S bug fixes (initiator listing and replication, mode and sync types) - Added ability to specify mirroring async or sync for replication - Added version header file to allow client version header checks - Simulator plug-in written in C, simc_lsmplugin is available * Tue Jun 12 2012 Tony Asleson 0.0.9-1 - Initial checkin of lio plug-in - System filtering via URI (smispy) - Error code mapping (ontap) - Fixed build so same build tarball is used for all binaries * Mon Jun 4 2012 Tony Asleson 0.0.8-1 - Make building of SMI-S CPP plugin optional - Add pkg-config file - SMIS: Fix exception while retrieving Volumes - SMIS: Fix exception while retrieving Volumes - lsm: Add package imports - Make Smis class available in lsm python package - Add option to disable building C unit test - Make simulator classes available in lsm python package - Make ontap class available in lsm python package - Changes to support building on Fedora 17 (v2) - Spec. file updates from feedback from T. Callaway (spot) - F17 linker symbol visibility correction - Remove unneeded build dependencies and cleaned up some warnings - C Updates, client C library feature parity with python * Fri May 11 2012 Tony Asleson 0.0.7-1 - Bug fix for smi-s constants - Display formatting improvements - Added header option for lsmcli - Improved version handling for builds - Made terminology consistent - Ability to list visibility for access groups and volumes - Simulator plug-in fully supports all block operations - Added support for multiple systems with a single plug-in instance * Fri Apr 20 2012 Tony Asleson 0.0.6-1 - Documentation improvements (man & source code) - Support for access groups - Unified spec files Fedora/RHEL - Package version auto generate - Rpm target added to make - Bug fix for missing optional property on volume retrieval (smispy plug-in) * Fri Apr 6 2012 Tony Asleson 0.0.5-1 - Spec file clean-up improvements - Async. operation added to lsmcli and ability to check on job status - Sub volume replication support - Ability to check for child dependencies on VOLUMES, FS and files - SMI-S Bug fixes and improvements * Mon Mar 26 2012 Tony Asleson 0.0.4-1 - Restore from snapshot - Job identifiers string instead of integer - Updated license address * Wed Mar 14 2012 Tony Asleson 0.0.3-1 - Changes to installer, daemon uid, gid, /var/run/lsm/* - NFS improvements and bug fixes - Python library clean up (rpmlint errors) * Sun Mar 11 2012 Tony Asleson 0.0.2-1 - Added NetApp native plugin * Mon Feb 6 2012 Tony Asleson 0.0.1alpha-1 - Initial version of package libstoragemgmt-1.2.3/build-aux/0000775000175000017500000000000012542455463013444 500000000000000libstoragemgmt-1.2.3/build-aux/missing0000755000175000017500000001533112540163524014754 00000000000000#! /bin/sh # Common wrapper for a few potentially missing GNU programs. scriptversion=2012-06-26.16; # UTC # Copyright (C) 1996-2013 Free Software Foundation, Inc. # Originally written by Fran,cois Pinard , 1996. # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2, or (at your option) # any later version. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # You should have received a copy of the GNU General Public License # along with this program. If not, see . # As a special exception to the GNU General Public License, if you # distribute this file as part of a program that contains a # configuration script generated by Autoconf, you may include it under # the same distribution terms that you use for the rest of that program. if test $# -eq 0; then echo 1>&2 "Try '$0 --help' for more information" exit 1 fi case $1 in --is-lightweight) # Used by our autoconf macros to check whether the available missing # script is modern enough. exit 0 ;; --run) # Back-compat with the calling convention used by older automake. shift ;; -h|--h|--he|--hel|--help) echo "\ $0 [OPTION]... PROGRAM [ARGUMENT]... Run 'PROGRAM [ARGUMENT]...', returning a proper advice when this fails due to PROGRAM being missing or too old. Options: -h, --help display this help and exit -v, --version output version information and exit Supported PROGRAM values: aclocal autoconf autoheader autom4te automake makeinfo bison yacc flex lex help2man Version suffixes to PROGRAM as well as the prefixes 'gnu-', 'gnu', and 'g' are ignored when checking the name. Send bug reports to ." exit $? ;; -v|--v|--ve|--ver|--vers|--versi|--versio|--version) echo "missing $scriptversion (GNU Automake)" exit $? ;; -*) echo 1>&2 "$0: unknown '$1' option" echo 1>&2 "Try '$0 --help' for more information" exit 1 ;; esac # Run the given program, remember its exit status. "$@"; st=$? # If it succeeded, we are done. test $st -eq 0 && exit 0 # Also exit now if we it failed (or wasn't found), and '--version' was # passed; such an option is passed most likely to detect whether the # program is present and works. case $2 in --version|--help) exit $st;; esac # Exit code 63 means version mismatch. This often happens when the user # tries to use an ancient version of a tool on a file that requires a # minimum version. if test $st -eq 63; then msg="probably too old" elif test $st -eq 127; then # Program was missing. msg="missing on your system" else # Program was found and executed, but failed. Give up. exit $st fi perl_URL=http://www.perl.org/ flex_URL=http://flex.sourceforge.net/ gnu_software_URL=http://www.gnu.org/software program_details () { case $1 in aclocal|automake) echo "The '$1' program is part of the GNU Automake package:" echo "<$gnu_software_URL/automake>" echo "It also requires GNU Autoconf, GNU m4 and Perl in order to run:" echo "<$gnu_software_URL/autoconf>" echo "<$gnu_software_URL/m4/>" echo "<$perl_URL>" ;; autoconf|autom4te|autoheader) echo "The '$1' program is part of the GNU Autoconf package:" echo "<$gnu_software_URL/autoconf/>" echo "It also requires GNU m4 and Perl in order to run:" echo "<$gnu_software_URL/m4/>" echo "<$perl_URL>" ;; esac } give_advice () { # Normalize program name to check for. normalized_program=`echo "$1" | sed ' s/^gnu-//; t s/^gnu//; t s/^g//; t'` printf '%s\n' "'$1' is $msg." configure_deps="'configure.ac' or m4 files included by 'configure.ac'" case $normalized_program in autoconf*) echo "You should only need it if you modified 'configure.ac'," echo "or m4 files included by it." program_details 'autoconf' ;; autoheader*) echo "You should only need it if you modified 'acconfig.h' or" echo "$configure_deps." program_details 'autoheader' ;; automake*) echo "You should only need it if you modified 'Makefile.am' or" echo "$configure_deps." program_details 'automake' ;; aclocal*) echo "You should only need it if you modified 'acinclude.m4' or" echo "$configure_deps." program_details 'aclocal' ;; autom4te*) echo "You might have modified some maintainer files that require" echo "the 'automa4te' program to be rebuilt." program_details 'autom4te' ;; bison*|yacc*) echo "You should only need it if you modified a '.y' file." echo "You may want to install the GNU Bison package:" echo "<$gnu_software_URL/bison/>" ;; lex*|flex*) echo "You should only need it if you modified a '.l' file." echo "You may want to install the Fast Lexical Analyzer package:" echo "<$flex_URL>" ;; help2man*) echo "You should only need it if you modified a dependency" \ "of a man page." echo "You may want to install the GNU Help2man package:" echo "<$gnu_software_URL/help2man/>" ;; makeinfo*) echo "You should only need it if you modified a '.texi' file, or" echo "any other file indirectly affecting the aspect of the manual." echo "You might want to install the Texinfo package:" echo "<$gnu_software_URL/texinfo/>" echo "The spurious makeinfo call might also be the consequence of" echo "using a buggy 'make' (AIX, DU, IRIX), in which case you might" echo "want to install GNU make:" echo "<$gnu_software_URL/make/>" ;; *) echo "You might have modified some files without having the proper" echo "tools for further handling them. Check the 'README' file, it" echo "often tells you about the needed prerequisites for installing" echo "this package. You may also peek at any GNU archive site, in" echo "case some other package contains this missing '$1' program." ;; esac } give_advice "$1" | sed -e '1s/^/WARNING: /' \ -e '2,$s/^/ /' >&2 # Propagate the correct exit status (expected to be 127 for a program # not found, 63 for a program that failed due to version mismatch). exit $st # Local variables: # eval: (add-hook 'write-file-hooks 'time-stamp) # time-stamp-start: "scriptversion=" # time-stamp-format: "%:y-%02m-%02d.%02H" # time-stamp-time-zone: "UTC" # time-stamp-end: "; # UTC" # End: libstoragemgmt-1.2.3/build-aux/config.sub0000755000175000017500000010531512540163524015342 00000000000000#! /bin/sh # Configuration validation subroutine script. # Copyright 1992-2013 Free Software Foundation, Inc. timestamp='2013-04-24' # This file is free software; you can redistribute it and/or modify it # under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, see . # # As a special exception to the GNU General Public License, if you # distribute this file as part of a program that contains a # configuration script generated by Autoconf, you may include it under # the same distribution terms that you use for the rest of that # program. This Exception is an additional permission under section 7 # of the GNU General Public License, version 3 ("GPLv3"). # Please send patches with a ChangeLog entry to config-patches@gnu.org. # # Configuration subroutine to validate and canonicalize a configuration type. # Supply the specified configuration type as an argument. # If it is invalid, we print an error message on stderr and exit with code 1. # Otherwise, we print the canonical config type on stdout and succeed. # You can get the latest version of this script from: # http://git.savannah.gnu.org/gitweb/?p=config.git;a=blob_plain;f=config.sub;hb=HEAD # This file is supposed to be the same for all GNU packages # and recognize all the CPU types, system types and aliases # that are meaningful with *any* GNU software. # Each package is responsible for reporting which valid configurations # it does not support. The user should be able to distinguish # a failure to support a valid configuration from a meaningless # configuration. # The goal of this file is to map all the various variations of a given # machine specification into a single specification in the form: # CPU_TYPE-MANUFACTURER-OPERATING_SYSTEM # or in some cases, the newer four-part form: # CPU_TYPE-MANUFACTURER-KERNEL-OPERATING_SYSTEM # It is wrong to echo any other type of specification. me=`echo "$0" | sed -e 's,.*/,,'` usage="\ Usage: $0 [OPTION] CPU-MFR-OPSYS $0 [OPTION] ALIAS Canonicalize a configuration name. Operation modes: -h, --help print this help, then exit -t, --time-stamp print date of last modification, then exit -v, --version print version number, then exit Report bugs and patches to ." version="\ GNU config.sub ($timestamp) Copyright 1992-2013 Free Software Foundation, Inc. This is free software; see the source for copying conditions. There is NO warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE." help=" Try \`$me --help' for more information." # Parse command line while test $# -gt 0 ; do case $1 in --time-stamp | --time* | -t ) echo "$timestamp" ; exit ;; --version | -v ) echo "$version" ; exit ;; --help | --h* | -h ) echo "$usage"; exit ;; -- ) # Stop option processing shift; break ;; - ) # Use stdin as input. break ;; -* ) echo "$me: invalid option $1$help" exit 1 ;; *local*) # First pass through any local machine types. echo $1 exit ;; * ) break ;; esac done case $# in 0) echo "$me: missing argument$help" >&2 exit 1;; 1) ;; *) echo "$me: too many arguments$help" >&2 exit 1;; esac # Separate what the user gave into CPU-COMPANY and OS or KERNEL-OS (if any). # Here we must recognize all the valid KERNEL-OS combinations. maybe_os=`echo $1 | sed 's/^\(.*\)-\([^-]*-[^-]*\)$/\2/'` case $maybe_os in nto-qnx* | linux-gnu* | linux-android* | linux-dietlibc | linux-newlib* | \ linux-musl* | linux-uclibc* | uclinux-uclibc* | uclinux-gnu* | kfreebsd*-gnu* | \ knetbsd*-gnu* | netbsd*-gnu* | \ kopensolaris*-gnu* | \ storm-chaos* | os2-emx* | rtmk-nova*) os=-$maybe_os basic_machine=`echo $1 | sed 's/^\(.*\)-\([^-]*-[^-]*\)$/\1/'` ;; android-linux) os=-linux-android basic_machine=`echo $1 | sed 's/^\(.*\)-\([^-]*-[^-]*\)$/\1/'`-unknown ;; *) basic_machine=`echo $1 | sed 's/-[^-]*$//'` if [ $basic_machine != $1 ] then os=`echo $1 | sed 's/.*-/-/'` else os=; fi ;; esac ### Let's recognize common machines as not being operating systems so ### that things like config.sub decstation-3100 work. We also ### recognize some manufacturers as not being operating systems, so we ### can provide default operating systems below. case $os in -sun*os*) # Prevent following clause from handling this invalid input. ;; -dec* | -mips* | -sequent* | -encore* | -pc532* | -sgi* | -sony* | \ -att* | -7300* | -3300* | -delta* | -motorola* | -sun[234]* | \ -unicom* | -ibm* | -next | -hp | -isi* | -apollo | -altos* | \ -convergent* | -ncr* | -news | -32* | -3600* | -3100* | -hitachi* |\ -c[123]* | -convex* | -sun | -crds | -omron* | -dg | -ultra | -tti* | \ -harris | -dolphin | -highlevel | -gould | -cbm | -ns | -masscomp | \ -apple | -axis | -knuth | -cray | -microblaze*) os= basic_machine=$1 ;; -bluegene*) os=-cnk ;; -sim | -cisco | -oki | -wec | -winbond) os= basic_machine=$1 ;; -scout) ;; -wrs) os=-vxworks basic_machine=$1 ;; -chorusos*) os=-chorusos basic_machine=$1 ;; -chorusrdb) os=-chorusrdb basic_machine=$1 ;; -hiux*) os=-hiuxwe2 ;; -sco6) os=-sco5v6 basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` ;; -sco5) os=-sco3.2v5 basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` ;; -sco4) os=-sco3.2v4 basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` ;; -sco3.2.[4-9]*) os=`echo $os | sed -e 's/sco3.2./sco3.2v/'` basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` ;; -sco3.2v[4-9]*) # Don't forget version if it is 3.2v4 or newer. basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` ;; -sco5v6*) # Don't forget version if it is 3.2v4 or newer. basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` ;; -sco*) os=-sco3.2v2 basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` ;; -udk*) basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` ;; -isc) os=-isc2.2 basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` ;; -clix*) basic_machine=clipper-intergraph ;; -isc*) basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` ;; -lynx*178) os=-lynxos178 ;; -lynx*5) os=-lynxos5 ;; -lynx*) os=-lynxos ;; -ptx*) basic_machine=`echo $1 | sed -e 's/86-.*/86-sequent/'` ;; -windowsnt*) os=`echo $os | sed -e 's/windowsnt/winnt/'` ;; -psos*) os=-psos ;; -mint | -mint[0-9]*) basic_machine=m68k-atari os=-mint ;; esac # Decode aliases for certain CPU-COMPANY combinations. case $basic_machine in # Recognize the basic CPU types without company name. # Some are omitted here because they have special meanings below. 1750a | 580 \ | a29k \ | aarch64 | aarch64_be \ | alpha | alphaev[4-8] | alphaev56 | alphaev6[78] | alphapca5[67] \ | alpha64 | alpha64ev[4-8] | alpha64ev56 | alpha64ev6[78] | alpha64pca5[67] \ | am33_2.0 \ | arc | arceb \ | arm | arm[bl]e | arme[lb] | armv[2-8] | armv[3-8][lb] | armv7[arm] \ | avr | avr32 \ | be32 | be64 \ | bfin \ | c4x | clipper \ | d10v | d30v | dlx | dsp16xx \ | epiphany \ | fido | fr30 | frv \ | h8300 | h8500 | hppa | hppa1.[01] | hppa2.0 | hppa2.0[nw] | hppa64 \ | hexagon \ | i370 | i860 | i960 | ia64 \ | ip2k | iq2000 \ | le32 | le64 \ | lm32 \ | m32c | m32r | m32rle | m68000 | m68k | m88k \ | maxq | mb | microblaze | microblazeel | mcore | mep | metag \ | mips | mipsbe | mipseb | mipsel | mipsle \ | mips16 \ | mips64 | mips64el \ | mips64octeon | mips64octeonel \ | mips64orion | mips64orionel \ | mips64r5900 | mips64r5900el \ | mips64vr | mips64vrel \ | mips64vr4100 | mips64vr4100el \ | mips64vr4300 | mips64vr4300el \ | mips64vr5000 | mips64vr5000el \ | mips64vr5900 | mips64vr5900el \ | mipsisa32 | mipsisa32el \ | mipsisa32r2 | mipsisa32r2el \ | mipsisa64 | mipsisa64el \ | mipsisa64r2 | mipsisa64r2el \ | mipsisa64sb1 | mipsisa64sb1el \ | mipsisa64sr71k | mipsisa64sr71kel \ | mipsr5900 | mipsr5900el \ | mipstx39 | mipstx39el \ | mn10200 | mn10300 \ | moxie \ | mt \ | msp430 \ | nds32 | nds32le | nds32be \ | nios | nios2 | nios2eb | nios2el \ | ns16k | ns32k \ | open8 \ | or1k | or32 \ | pdp10 | pdp11 | pj | pjl \ | powerpc | powerpc64 | powerpc64le | powerpcle \ | pyramid \ | rl78 | rx \ | score \ | sh | sh[1234] | sh[24]a | sh[24]aeb | sh[23]e | sh[34]eb | sheb | shbe | shle | sh[1234]le | sh3ele \ | sh64 | sh64le \ | sparc | sparc64 | sparc64b | sparc64v | sparc86x | sparclet | sparclite \ | sparcv8 | sparcv9 | sparcv9b | sparcv9v \ | spu \ | tahoe | tic4x | tic54x | tic55x | tic6x | tic80 | tron \ | ubicom32 \ | v850 | v850e | v850e1 | v850e2 | v850es | v850e2v3 \ | we32k \ | x86 | xc16x | xstormy16 | xtensa \ | z8k | z80) basic_machine=$basic_machine-unknown ;; c54x) basic_machine=tic54x-unknown ;; c55x) basic_machine=tic55x-unknown ;; c6x) basic_machine=tic6x-unknown ;; m6811 | m68hc11 | m6812 | m68hc12 | m68hcs12x | picochip) basic_machine=$basic_machine-unknown os=-none ;; m88110 | m680[12346]0 | m683?2 | m68360 | m5200 | v70 | w65 | z8k) ;; ms1) basic_machine=mt-unknown ;; strongarm | thumb | xscale) basic_machine=arm-unknown ;; xgate) basic_machine=$basic_machine-unknown os=-none ;; xscaleeb) basic_machine=armeb-unknown ;; xscaleel) basic_machine=armel-unknown ;; # We use `pc' rather than `unknown' # because (1) that's what they normally are, and # (2) the word "unknown" tends to confuse beginning users. i*86 | x86_64) basic_machine=$basic_machine-pc ;; # Object if more than one company name word. *-*-*) echo Invalid configuration \`$1\': machine \`$basic_machine\' not recognized 1>&2 exit 1 ;; # Recognize the basic CPU types with company name. 580-* \ | a29k-* \ | aarch64-* | aarch64_be-* \ | alpha-* | alphaev[4-8]-* | alphaev56-* | alphaev6[78]-* \ | alpha64-* | alpha64ev[4-8]-* | alpha64ev56-* | alpha64ev6[78]-* \ | alphapca5[67]-* | alpha64pca5[67]-* | arc-* | arceb-* \ | arm-* | armbe-* | armle-* | armeb-* | armv*-* \ | avr-* | avr32-* \ | be32-* | be64-* \ | bfin-* | bs2000-* \ | c[123]* | c30-* | [cjt]90-* | c4x-* \ | clipper-* | craynv-* | cydra-* \ | d10v-* | d30v-* | dlx-* \ | elxsi-* \ | f30[01]-* | f700-* | fido-* | fr30-* | frv-* | fx80-* \ | h8300-* | h8500-* \ | hppa-* | hppa1.[01]-* | hppa2.0-* | hppa2.0[nw]-* | hppa64-* \ | hexagon-* \ | i*86-* | i860-* | i960-* | ia64-* \ | ip2k-* | iq2000-* \ | le32-* | le64-* \ | lm32-* \ | m32c-* | m32r-* | m32rle-* \ | m68000-* | m680[012346]0-* | m68360-* | m683?2-* | m68k-* \ | m88110-* | m88k-* | maxq-* | mcore-* | metag-* \ | microblaze-* | microblazeel-* \ | mips-* | mipsbe-* | mipseb-* | mipsel-* | mipsle-* \ | mips16-* \ | mips64-* | mips64el-* \ | mips64octeon-* | mips64octeonel-* \ | mips64orion-* | mips64orionel-* \ | mips64r5900-* | mips64r5900el-* \ | mips64vr-* | mips64vrel-* \ | mips64vr4100-* | mips64vr4100el-* \ | mips64vr4300-* | mips64vr4300el-* \ | mips64vr5000-* | mips64vr5000el-* \ | mips64vr5900-* | mips64vr5900el-* \ | mipsisa32-* | mipsisa32el-* \ | mipsisa32r2-* | mipsisa32r2el-* \ | mipsisa64-* | mipsisa64el-* \ | mipsisa64r2-* | mipsisa64r2el-* \ | mipsisa64sb1-* | mipsisa64sb1el-* \ | mipsisa64sr71k-* | mipsisa64sr71kel-* \ | mipsr5900-* | mipsr5900el-* \ | mipstx39-* | mipstx39el-* \ | mmix-* \ | mt-* \ | msp430-* \ | nds32-* | nds32le-* | nds32be-* \ | nios-* | nios2-* | nios2eb-* | nios2el-* \ | none-* | np1-* | ns16k-* | ns32k-* \ | open8-* \ | orion-* \ | pdp10-* | pdp11-* | pj-* | pjl-* | pn-* | power-* \ | powerpc-* | powerpc64-* | powerpc64le-* | powerpcle-* \ | pyramid-* \ | rl78-* | romp-* | rs6000-* | rx-* \ | sh-* | sh[1234]-* | sh[24]a-* | sh[24]aeb-* | sh[23]e-* | sh[34]eb-* | sheb-* | shbe-* \ | shle-* | sh[1234]le-* | sh3ele-* | sh64-* | sh64le-* \ | sparc-* | sparc64-* | sparc64b-* | sparc64v-* | sparc86x-* | sparclet-* \ | sparclite-* \ | sparcv8-* | sparcv9-* | sparcv9b-* | sparcv9v-* | sv1-* | sx?-* \ | tahoe-* \ | tic30-* | tic4x-* | tic54x-* | tic55x-* | tic6x-* | tic80-* \ | tile*-* \ | tron-* \ | ubicom32-* \ | v850-* | v850e-* | v850e1-* | v850es-* | v850e2-* | v850e2v3-* \ | vax-* \ | we32k-* \ | x86-* | x86_64-* | xc16x-* | xps100-* \ | xstormy16-* | xtensa*-* \ | ymp-* \ | z8k-* | z80-*) ;; # Recognize the basic CPU types without company name, with glob match. xtensa*) basic_machine=$basic_machine-unknown ;; # Recognize the various machine names and aliases which stand # for a CPU type and a company and sometimes even an OS. 386bsd) basic_machine=i386-unknown os=-bsd ;; 3b1 | 7300 | 7300-att | att-7300 | pc7300 | safari | unixpc) basic_machine=m68000-att ;; 3b*) basic_machine=we32k-att ;; a29khif) basic_machine=a29k-amd os=-udi ;; abacus) basic_machine=abacus-unknown ;; adobe68k) basic_machine=m68010-adobe os=-scout ;; alliant | fx80) basic_machine=fx80-alliant ;; altos | altos3068) basic_machine=m68k-altos ;; am29k) basic_machine=a29k-none os=-bsd ;; amd64) basic_machine=x86_64-pc ;; amd64-*) basic_machine=x86_64-`echo $basic_machine | sed 's/^[^-]*-//'` ;; amdahl) basic_machine=580-amdahl os=-sysv ;; amiga | amiga-*) basic_machine=m68k-unknown ;; amigaos | amigados) basic_machine=m68k-unknown os=-amigaos ;; amigaunix | amix) basic_machine=m68k-unknown os=-sysv4 ;; apollo68) basic_machine=m68k-apollo os=-sysv ;; apollo68bsd) basic_machine=m68k-apollo os=-bsd ;; aros) basic_machine=i386-pc os=-aros ;; aux) basic_machine=m68k-apple os=-aux ;; balance) basic_machine=ns32k-sequent os=-dynix ;; blackfin) basic_machine=bfin-unknown os=-linux ;; blackfin-*) basic_machine=bfin-`echo $basic_machine | sed 's/^[^-]*-//'` os=-linux ;; bluegene*) basic_machine=powerpc-ibm os=-cnk ;; c54x-*) basic_machine=tic54x-`echo $basic_machine | sed 's/^[^-]*-//'` ;; c55x-*) basic_machine=tic55x-`echo $basic_machine | sed 's/^[^-]*-//'` ;; c6x-*) basic_machine=tic6x-`echo $basic_machine | sed 's/^[^-]*-//'` ;; c90) basic_machine=c90-cray os=-unicos ;; cegcc) basic_machine=arm-unknown os=-cegcc ;; convex-c1) basic_machine=c1-convex os=-bsd ;; convex-c2) basic_machine=c2-convex os=-bsd ;; convex-c32) basic_machine=c32-convex os=-bsd ;; convex-c34) basic_machine=c34-convex os=-bsd ;; convex-c38) basic_machine=c38-convex os=-bsd ;; cray | j90) basic_machine=j90-cray os=-unicos ;; craynv) basic_machine=craynv-cray os=-unicosmp ;; cr16 | cr16-*) basic_machine=cr16-unknown os=-elf ;; crds | unos) basic_machine=m68k-crds ;; crisv32 | crisv32-* | etraxfs*) basic_machine=crisv32-axis ;; cris | cris-* | etrax*) basic_machine=cris-axis ;; crx) basic_machine=crx-unknown os=-elf ;; da30 | da30-*) basic_machine=m68k-da30 ;; decstation | decstation-3100 | pmax | pmax-* | pmin | dec3100 | decstatn) basic_machine=mips-dec ;; decsystem10* | dec10*) basic_machine=pdp10-dec os=-tops10 ;; decsystem20* | dec20*) basic_machine=pdp10-dec os=-tops20 ;; delta | 3300 | motorola-3300 | motorola-delta \ | 3300-motorola | delta-motorola) basic_machine=m68k-motorola ;; delta88) basic_machine=m88k-motorola os=-sysv3 ;; dicos) basic_machine=i686-pc os=-dicos ;; djgpp) basic_machine=i586-pc os=-msdosdjgpp ;; dpx20 | dpx20-*) basic_machine=rs6000-bull os=-bosx ;; dpx2* | dpx2*-bull) basic_machine=m68k-bull os=-sysv3 ;; ebmon29k) basic_machine=a29k-amd os=-ebmon ;; elxsi) basic_machine=elxsi-elxsi os=-bsd ;; encore | umax | mmax) basic_machine=ns32k-encore ;; es1800 | OSE68k | ose68k | ose | OSE) basic_machine=m68k-ericsson os=-ose ;; fx2800) basic_machine=i860-alliant ;; genix) basic_machine=ns32k-ns ;; gmicro) basic_machine=tron-gmicro os=-sysv ;; go32) basic_machine=i386-pc os=-go32 ;; h3050r* | hiux*) basic_machine=hppa1.1-hitachi os=-hiuxwe2 ;; h8300hms) basic_machine=h8300-hitachi os=-hms ;; h8300xray) basic_machine=h8300-hitachi os=-xray ;; h8500hms) basic_machine=h8500-hitachi os=-hms ;; harris) basic_machine=m88k-harris os=-sysv3 ;; hp300-*) basic_machine=m68k-hp ;; hp300bsd) basic_machine=m68k-hp os=-bsd ;; hp300hpux) basic_machine=m68k-hp os=-hpux ;; hp3k9[0-9][0-9] | hp9[0-9][0-9]) basic_machine=hppa1.0-hp ;; hp9k2[0-9][0-9] | hp9k31[0-9]) basic_machine=m68000-hp ;; hp9k3[2-9][0-9]) basic_machine=m68k-hp ;; hp9k6[0-9][0-9] | hp6[0-9][0-9]) basic_machine=hppa1.0-hp ;; hp9k7[0-79][0-9] | hp7[0-79][0-9]) basic_machine=hppa1.1-hp ;; hp9k78[0-9] | hp78[0-9]) # FIXME: really hppa2.0-hp basic_machine=hppa1.1-hp ;; hp9k8[67]1 | hp8[67]1 | hp9k80[24] | hp80[24] | hp9k8[78]9 | hp8[78]9 | hp9k893 | hp893) # FIXME: really hppa2.0-hp basic_machine=hppa1.1-hp ;; hp9k8[0-9][13679] | hp8[0-9][13679]) basic_machine=hppa1.1-hp ;; hp9k8[0-9][0-9] | hp8[0-9][0-9]) basic_machine=hppa1.0-hp ;; hppa-next) os=-nextstep3 ;; hppaosf) basic_machine=hppa1.1-hp os=-osf ;; hppro) basic_machine=hppa1.1-hp os=-proelf ;; i370-ibm* | ibm*) basic_machine=i370-ibm ;; i*86v32) basic_machine=`echo $1 | sed -e 's/86.*/86-pc/'` os=-sysv32 ;; i*86v4*) basic_machine=`echo $1 | sed -e 's/86.*/86-pc/'` os=-sysv4 ;; i*86v) basic_machine=`echo $1 | sed -e 's/86.*/86-pc/'` os=-sysv ;; i*86sol2) basic_machine=`echo $1 | sed -e 's/86.*/86-pc/'` os=-solaris2 ;; i386mach) basic_machine=i386-mach os=-mach ;; i386-vsta | vsta) basic_machine=i386-unknown os=-vsta ;; iris | iris4d) basic_machine=mips-sgi case $os in -irix*) ;; *) os=-irix4 ;; esac ;; isi68 | isi) basic_machine=m68k-isi os=-sysv ;; m68knommu) basic_machine=m68k-unknown os=-linux ;; m68knommu-*) basic_machine=m68k-`echo $basic_machine | sed 's/^[^-]*-//'` os=-linux ;; m88k-omron*) basic_machine=m88k-omron ;; magnum | m3230) basic_machine=mips-mips os=-sysv ;; merlin) basic_machine=ns32k-utek os=-sysv ;; microblaze*) basic_machine=microblaze-xilinx ;; mingw64) basic_machine=x86_64-pc os=-mingw64 ;; mingw32) basic_machine=i386-pc os=-mingw32 ;; mingw32ce) basic_machine=arm-unknown os=-mingw32ce ;; miniframe) basic_machine=m68000-convergent ;; *mint | -mint[0-9]* | *MiNT | *MiNT[0-9]*) basic_machine=m68k-atari os=-mint ;; mips3*-*) basic_machine=`echo $basic_machine | sed -e 's/mips3/mips64/'` ;; mips3*) basic_machine=`echo $basic_machine | sed -e 's/mips3/mips64/'`-unknown ;; monitor) basic_machine=m68k-rom68k os=-coff ;; morphos) basic_machine=powerpc-unknown os=-morphos ;; msdos) basic_machine=i386-pc os=-msdos ;; ms1-*) basic_machine=`echo $basic_machine | sed -e 's/ms1-/mt-/'` ;; msys) basic_machine=i386-pc os=-msys ;; mvs) basic_machine=i370-ibm os=-mvs ;; nacl) basic_machine=le32-unknown os=-nacl ;; ncr3000) basic_machine=i486-ncr os=-sysv4 ;; netbsd386) basic_machine=i386-unknown os=-netbsd ;; netwinder) basic_machine=armv4l-rebel os=-linux ;; news | news700 | news800 | news900) basic_machine=m68k-sony os=-newsos ;; news1000) basic_machine=m68030-sony os=-newsos ;; news-3600 | risc-news) basic_machine=mips-sony os=-newsos ;; necv70) basic_machine=v70-nec os=-sysv ;; next | m*-next ) basic_machine=m68k-next case $os in -nextstep* ) ;; -ns2*) os=-nextstep2 ;; *) os=-nextstep3 ;; esac ;; nh3000) basic_machine=m68k-harris os=-cxux ;; nh[45]000) basic_machine=m88k-harris os=-cxux ;; nindy960) basic_machine=i960-intel os=-nindy ;; mon960) basic_machine=i960-intel os=-mon960 ;; nonstopux) basic_machine=mips-compaq os=-nonstopux ;; np1) basic_machine=np1-gould ;; neo-tandem) basic_machine=neo-tandem ;; nse-tandem) basic_machine=nse-tandem ;; nsr-tandem) basic_machine=nsr-tandem ;; op50n-* | op60c-*) basic_machine=hppa1.1-oki os=-proelf ;; openrisc | openrisc-*) basic_machine=or32-unknown ;; os400) basic_machine=powerpc-ibm os=-os400 ;; OSE68000 | ose68000) basic_machine=m68000-ericsson os=-ose ;; os68k) basic_machine=m68k-none os=-os68k ;; pa-hitachi) basic_machine=hppa1.1-hitachi os=-hiuxwe2 ;; paragon) basic_machine=i860-intel os=-osf ;; parisc) basic_machine=hppa-unknown os=-linux ;; parisc-*) basic_machine=hppa-`echo $basic_machine | sed 's/^[^-]*-//'` os=-linux ;; pbd) basic_machine=sparc-tti ;; pbb) basic_machine=m68k-tti ;; pc532 | pc532-*) basic_machine=ns32k-pc532 ;; pc98) basic_machine=i386-pc ;; pc98-*) basic_machine=i386-`echo $basic_machine | sed 's/^[^-]*-//'` ;; pentium | p5 | k5 | k6 | nexgen | viac3) basic_machine=i586-pc ;; pentiumpro | p6 | 6x86 | athlon | athlon_*) basic_machine=i686-pc ;; pentiumii | pentium2 | pentiumiii | pentium3) basic_machine=i686-pc ;; pentium4) basic_machine=i786-pc ;; pentium-* | p5-* | k5-* | k6-* | nexgen-* | viac3-*) basic_machine=i586-`echo $basic_machine | sed 's/^[^-]*-//'` ;; pentiumpro-* | p6-* | 6x86-* | athlon-*) basic_machine=i686-`echo $basic_machine | sed 's/^[^-]*-//'` ;; pentiumii-* | pentium2-* | pentiumiii-* | pentium3-*) basic_machine=i686-`echo $basic_machine | sed 's/^[^-]*-//'` ;; pentium4-*) basic_machine=i786-`echo $basic_machine | sed 's/^[^-]*-//'` ;; pn) basic_machine=pn-gould ;; power) basic_machine=power-ibm ;; ppc | ppcbe) basic_machine=powerpc-unknown ;; ppc-* | ppcbe-*) basic_machine=powerpc-`echo $basic_machine | sed 's/^[^-]*-//'` ;; ppcle | powerpclittle | ppc-le | powerpc-little) basic_machine=powerpcle-unknown ;; ppcle-* | powerpclittle-*) basic_machine=powerpcle-`echo $basic_machine | sed 's/^[^-]*-//'` ;; ppc64) basic_machine=powerpc64-unknown ;; ppc64-* | ppc64p7-*) basic_machine=powerpc64-`echo $basic_machine | sed 's/^[^-]*-//'` ;; ppc64le | powerpc64little | ppc64-le | powerpc64-little) basic_machine=powerpc64le-unknown ;; ppc64le-* | powerpc64little-*) basic_machine=powerpc64le-`echo $basic_machine | sed 's/^[^-]*-//'` ;; ps2) basic_machine=i386-ibm ;; pw32) basic_machine=i586-unknown os=-pw32 ;; rdos | rdos64) basic_machine=x86_64-pc os=-rdos ;; rdos32) basic_machine=i386-pc os=-rdos ;; rom68k) basic_machine=m68k-rom68k os=-coff ;; rm[46]00) basic_machine=mips-siemens ;; rtpc | rtpc-*) basic_machine=romp-ibm ;; s390 | s390-*) basic_machine=s390-ibm ;; s390x | s390x-*) basic_machine=s390x-ibm ;; sa29200) basic_machine=a29k-amd os=-udi ;; sb1) basic_machine=mipsisa64sb1-unknown ;; sb1el) basic_machine=mipsisa64sb1el-unknown ;; sde) basic_machine=mipsisa32-sde os=-elf ;; sei) basic_machine=mips-sei os=-seiux ;; sequent) basic_machine=i386-sequent ;; sh) basic_machine=sh-hitachi os=-hms ;; sh5el) basic_machine=sh5le-unknown ;; sh64) basic_machine=sh64-unknown ;; sparclite-wrs | simso-wrs) basic_machine=sparclite-wrs os=-vxworks ;; sps7) basic_machine=m68k-bull os=-sysv2 ;; spur) basic_machine=spur-unknown ;; st2000) basic_machine=m68k-tandem ;; stratus) basic_machine=i860-stratus os=-sysv4 ;; strongarm-* | thumb-*) basic_machine=arm-`echo $basic_machine | sed 's/^[^-]*-//'` ;; sun2) basic_machine=m68000-sun ;; sun2os3) basic_machine=m68000-sun os=-sunos3 ;; sun2os4) basic_machine=m68000-sun os=-sunos4 ;; sun3os3) basic_machine=m68k-sun os=-sunos3 ;; sun3os4) basic_machine=m68k-sun os=-sunos4 ;; sun4os3) basic_machine=sparc-sun os=-sunos3 ;; sun4os4) basic_machine=sparc-sun os=-sunos4 ;; sun4sol2) basic_machine=sparc-sun os=-solaris2 ;; sun3 | sun3-*) basic_machine=m68k-sun ;; sun4) basic_machine=sparc-sun ;; sun386 | sun386i | roadrunner) basic_machine=i386-sun ;; sv1) basic_machine=sv1-cray os=-unicos ;; symmetry) basic_machine=i386-sequent os=-dynix ;; t3e) basic_machine=alphaev5-cray os=-unicos ;; t90) basic_machine=t90-cray os=-unicos ;; tile*) basic_machine=$basic_machine-unknown os=-linux-gnu ;; tx39) basic_machine=mipstx39-unknown ;; tx39el) basic_machine=mipstx39el-unknown ;; toad1) basic_machine=pdp10-xkl os=-tops20 ;; tower | tower-32) basic_machine=m68k-ncr ;; tpf) basic_machine=s390x-ibm os=-tpf ;; udi29k) basic_machine=a29k-amd os=-udi ;; ultra3) basic_machine=a29k-nyu os=-sym1 ;; v810 | necv810) basic_machine=v810-nec os=-none ;; vaxv) basic_machine=vax-dec os=-sysv ;; vms) basic_machine=vax-dec os=-vms ;; vpp*|vx|vx-*) basic_machine=f301-fujitsu ;; vxworks960) basic_machine=i960-wrs os=-vxworks ;; vxworks68) basic_machine=m68k-wrs os=-vxworks ;; vxworks29k) basic_machine=a29k-wrs os=-vxworks ;; w65*) basic_machine=w65-wdc os=-none ;; w89k-*) basic_machine=hppa1.1-winbond os=-proelf ;; xbox) basic_machine=i686-pc os=-mingw32 ;; xps | xps100) basic_machine=xps100-honeywell ;; xscale-* | xscalee[bl]-*) basic_machine=`echo $basic_machine | sed 's/^xscale/arm/'` ;; ymp) basic_machine=ymp-cray os=-unicos ;; z8k-*-coff) basic_machine=z8k-unknown os=-sim ;; z80-*-coff) basic_machine=z80-unknown os=-sim ;; none) basic_machine=none-none os=-none ;; # Here we handle the default manufacturer of certain CPU types. It is in # some cases the only manufacturer, in others, it is the most popular. w89k) basic_machine=hppa1.1-winbond ;; op50n) basic_machine=hppa1.1-oki ;; op60c) basic_machine=hppa1.1-oki ;; romp) basic_machine=romp-ibm ;; mmix) basic_machine=mmix-knuth ;; rs6000) basic_machine=rs6000-ibm ;; vax) basic_machine=vax-dec ;; pdp10) # there are many clones, so DEC is not a safe bet basic_machine=pdp10-unknown ;; pdp11) basic_machine=pdp11-dec ;; we32k) basic_machine=we32k-att ;; sh[1234] | sh[24]a | sh[24]aeb | sh[34]eb | sh[1234]le | sh[23]ele) basic_machine=sh-unknown ;; sparc | sparcv8 | sparcv9 | sparcv9b | sparcv9v) basic_machine=sparc-sun ;; cydra) basic_machine=cydra-cydrome ;; orion) basic_machine=orion-highlevel ;; orion105) basic_machine=clipper-highlevel ;; mac | mpw | mac-mpw) basic_machine=m68k-apple ;; pmac | pmac-mpw) basic_machine=powerpc-apple ;; *-unknown) # Make sure to match an already-canonicalized machine name. ;; *) echo Invalid configuration \`$1\': machine \`$basic_machine\' not recognized 1>&2 exit 1 ;; esac # Here we canonicalize certain aliases for manufacturers. case $basic_machine in *-digital*) basic_machine=`echo $basic_machine | sed 's/digital.*/dec/'` ;; *-commodore*) basic_machine=`echo $basic_machine | sed 's/commodore.*/cbm/'` ;; *) ;; esac # Decode manufacturer-specific aliases for certain operating systems. if [ x"$os" != x"" ] then case $os in # First match some system type aliases # that might get confused with valid system types. # -solaris* is a basic system type, with this one exception. -auroraux) os=-auroraux ;; -solaris1 | -solaris1.*) os=`echo $os | sed -e 's|solaris1|sunos4|'` ;; -solaris) os=-solaris2 ;; -svr4*) os=-sysv4 ;; -unixware*) os=-sysv4.2uw ;; -gnu/linux*) os=`echo $os | sed -e 's|gnu/linux|linux-gnu|'` ;; # First accept the basic system types. # The portable systems comes first. # Each alternative MUST END IN A *, to match a version number. # -sysv* is not here because it comes later, after sysvr4. -gnu* | -bsd* | -mach* | -minix* | -genix* | -ultrix* | -irix* \ | -*vms* | -sco* | -esix* | -isc* | -aix* | -cnk* | -sunos | -sunos[34]*\ | -hpux* | -unos* | -osf* | -luna* | -dgux* | -auroraux* | -solaris* \ | -sym* | -kopensolaris* | -plan9* \ | -amigaos* | -amigados* | -msdos* | -newsos* | -unicos* | -aof* \ | -aos* | -aros* \ | -nindy* | -vxsim* | -vxworks* | -ebmon* | -hms* | -mvs* \ | -clix* | -riscos* | -uniplus* | -iris* | -rtu* | -xenix* \ | -hiux* | -386bsd* | -knetbsd* | -mirbsd* | -netbsd* \ | -bitrig* | -openbsd* | -solidbsd* \ | -ekkobsd* | -kfreebsd* | -freebsd* | -riscix* | -lynxos* \ | -bosx* | -nextstep* | -cxux* | -aout* | -elf* | -oabi* \ | -ptx* | -coff* | -ecoff* | -winnt* | -domain* | -vsta* \ | -udi* | -eabi* | -lites* | -ieee* | -go32* | -aux* \ | -chorusos* | -chorusrdb* | -cegcc* \ | -cygwin* | -msys* | -pe* | -psos* | -moss* | -proelf* | -rtems* \ | -mingw32* | -mingw64* | -linux-gnu* | -linux-android* \ | -linux-newlib* | -linux-musl* | -linux-uclibc* \ | -uxpv* | -beos* | -mpeix* | -udk* \ | -interix* | -uwin* | -mks* | -rhapsody* | -darwin* | -opened* \ | -openstep* | -oskit* | -conix* | -pw32* | -nonstopux* \ | -storm-chaos* | -tops10* | -tenex* | -tops20* | -its* \ | -os2* | -vos* | -palmos* | -uclinux* | -nucleus* \ | -morphos* | -superux* | -rtmk* | -rtmk-nova* | -windiss* \ | -powermax* | -dnix* | -nx6 | -nx7 | -sei* | -dragonfly* \ | -skyos* | -haiku* | -rdos* | -toppers* | -drops* | -es*) # Remember, each alternative MUST END IN *, to match a version number. ;; -qnx*) case $basic_machine in x86-* | i*86-*) ;; *) os=-nto$os ;; esac ;; -nto-qnx*) ;; -nto*) os=`echo $os | sed -e 's|nto|nto-qnx|'` ;; -sim | -es1800* | -hms* | -xray | -os68k* | -none* | -v88r* \ | -windows* | -osx | -abug | -netware* | -os9* | -beos* | -haiku* \ | -macos* | -mpw* | -magic* | -mmixware* | -mon960* | -lnews*) ;; -mac*) os=`echo $os | sed -e 's|mac|macos|'` ;; -linux-dietlibc) os=-linux-dietlibc ;; -linux*) os=`echo $os | sed -e 's|linux|linux-gnu|'` ;; -sunos5*) os=`echo $os | sed -e 's|sunos5|solaris2|'` ;; -sunos6*) os=`echo $os | sed -e 's|sunos6|solaris3|'` ;; -opened*) os=-openedition ;; -os400*) os=-os400 ;; -wince*) os=-wince ;; -osfrose*) os=-osfrose ;; -osf*) os=-osf ;; -utek*) os=-bsd ;; -dynix*) os=-bsd ;; -acis*) os=-aos ;; -atheos*) os=-atheos ;; -syllable*) os=-syllable ;; -386bsd) os=-bsd ;; -ctix* | -uts*) os=-sysv ;; -nova*) os=-rtmk-nova ;; -ns2 ) os=-nextstep2 ;; -nsk*) os=-nsk ;; # Preserve the version number of sinix5. -sinix5.*) os=`echo $os | sed -e 's|sinix|sysv|'` ;; -sinix*) os=-sysv4 ;; -tpf*) os=-tpf ;; -triton*) os=-sysv3 ;; -oss*) os=-sysv3 ;; -svr4) os=-sysv4 ;; -svr3) os=-sysv3 ;; -sysvr4) os=-sysv4 ;; # This must come after -sysvr4. -sysv*) ;; -ose*) os=-ose ;; -es1800*) os=-ose ;; -xenix) os=-xenix ;; -*mint | -mint[0-9]* | -*MiNT | -MiNT[0-9]*) os=-mint ;; -aros*) os=-aros ;; -zvmoe) os=-zvmoe ;; -dicos*) os=-dicos ;; -nacl*) ;; -none) ;; *) # Get rid of the `-' at the beginning of $os. os=`echo $os | sed 's/[^-]*-//'` echo Invalid configuration \`$1\': system \`$os\' not recognized 1>&2 exit 1 ;; esac else # Here we handle the default operating systems that come with various machines. # The value should be what the vendor currently ships out the door with their # machine or put another way, the most popular os provided with the machine. # Note that if you're going to try to match "-MANUFACTURER" here (say, # "-sun"), then you have to tell the case statement up towards the top # that MANUFACTURER isn't an operating system. Otherwise, code above # will signal an error saying that MANUFACTURER isn't an operating # system, and we'll never get to this point. case $basic_machine in score-*) os=-elf ;; spu-*) os=-elf ;; *-acorn) os=-riscix1.2 ;; arm*-rebel) os=-linux ;; arm*-semi) os=-aout ;; c4x-* | tic4x-*) os=-coff ;; hexagon-*) os=-elf ;; tic54x-*) os=-coff ;; tic55x-*) os=-coff ;; tic6x-*) os=-coff ;; # This must come before the *-dec entry. pdp10-*) os=-tops20 ;; pdp11-*) os=-none ;; *-dec | vax-*) os=-ultrix4.2 ;; m68*-apollo) os=-domain ;; i386-sun) os=-sunos4.0.2 ;; m68000-sun) os=-sunos3 ;; m68*-cisco) os=-aout ;; mep-*) os=-elf ;; mips*-cisco) os=-elf ;; mips*-*) os=-elf ;; or1k-*) os=-elf ;; or32-*) os=-coff ;; *-tti) # must be before sparc entry or we get the wrong os. os=-sysv3 ;; sparc-* | *-sun) os=-sunos4.1.1 ;; *-be) os=-beos ;; *-haiku) os=-haiku ;; *-ibm) os=-aix ;; *-knuth) os=-mmixware ;; *-wec) os=-proelf ;; *-winbond) os=-proelf ;; *-oki) os=-proelf ;; *-hp) os=-hpux ;; *-hitachi) os=-hiux ;; i860-* | *-att | *-ncr | *-altos | *-motorola | *-convergent) os=-sysv ;; *-cbm) os=-amigaos ;; *-dg) os=-dgux ;; *-dolphin) os=-sysv3 ;; m68k-ccur) os=-rtu ;; m88k-omron*) os=-luna ;; *-next ) os=-nextstep ;; *-sequent) os=-ptx ;; *-crds) os=-unos ;; *-ns) os=-genix ;; i370-*) os=-mvs ;; *-next) os=-nextstep3 ;; *-gould) os=-sysv ;; *-highlevel) os=-bsd ;; *-encore) os=-bsd ;; *-sgi) os=-irix ;; *-siemens) os=-sysv4 ;; *-masscomp) os=-rtu ;; f30[01]-fujitsu | f700-fujitsu) os=-uxpv ;; *-rom68k) os=-coff ;; *-*bug) os=-coff ;; *-apple) os=-macos ;; *-atari*) os=-mint ;; *) os=-none ;; esac fi # Here we handle the case where we know the os, and the CPU type, but not the # manufacturer. We pick the logical manufacturer. vendor=unknown case $basic_machine in *-unknown) case $os in -riscix*) vendor=acorn ;; -sunos*) vendor=sun ;; -cnk*|-aix*) vendor=ibm ;; -beos*) vendor=be ;; -hpux*) vendor=hp ;; -mpeix*) vendor=hp ;; -hiux*) vendor=hitachi ;; -unos*) vendor=crds ;; -dgux*) vendor=dg ;; -luna*) vendor=omron ;; -genix*) vendor=ns ;; -mvs* | -opened*) vendor=ibm ;; -os400*) vendor=ibm ;; -ptx*) vendor=sequent ;; -tpf*) vendor=ibm ;; -vxsim* | -vxworks* | -windiss*) vendor=wrs ;; -aux*) vendor=apple ;; -hms*) vendor=hitachi ;; -mpw* | -macos*) vendor=apple ;; -*mint | -mint[0-9]* | -*MiNT | -MiNT[0-9]*) vendor=atari ;; -vos*) vendor=stratus ;; esac basic_machine=`echo $basic_machine | sed "s/unknown/$vendor/"` ;; esac echo $basic_machine$os exit # Local variables: # eval: (add-hook 'write-file-hooks 'time-stamp) # time-stamp-start: "timestamp='" # time-stamp-format: "%:y-%02m-%02d" # time-stamp-end: "'" # End: libstoragemgmt-1.2.3/build-aux/ltmain.sh0000644000175000017500000105152212540163517015202 00000000000000 # libtool (GNU libtool) 2.4.2 # Written by Gordon Matzigkeit , 1996 # Copyright (C) 1996, 1997, 1998, 1999, 2000, 2001, 2003, 2004, 2005, 2006, # 2007, 2008, 2009, 2010, 2011 Free Software Foundation, Inc. # This is free software; see the source for copying conditions. There is NO # warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. # GNU Libtool is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # As a special exception to the GNU General Public License, # if you distribute this file as part of a program or library that # is built using GNU Libtool, you may include this file under the # same distribution terms that you use for the rest of that program. # # GNU Libtool is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # General Public License for more details. # # You should have received a copy of the GNU General Public License # along with GNU Libtool; see the file COPYING. If not, a copy # can be downloaded from http://www.gnu.org/licenses/gpl.html, # or obtained by writing to the Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. # Usage: $progname [OPTION]... [MODE-ARG]... # # Provide generalized library-building support services. # # --config show all configuration variables # --debug enable verbose shell tracing # -n, --dry-run display commands without modifying any files # --features display basic configuration information and exit # --mode=MODE use operation mode MODE # --preserve-dup-deps don't remove duplicate dependency libraries # --quiet, --silent don't print informational messages # --no-quiet, --no-silent # print informational messages (default) # --no-warn don't display warning messages # --tag=TAG use configuration variables from tag TAG # -v, --verbose print more informational messages than default # --no-verbose don't print the extra informational messages # --version print version information # -h, --help, --help-all print short, long, or detailed help message # # MODE must be one of the following: # # clean remove files from the build directory # compile compile a source file into a libtool object # execute automatically set library path, then run a program # finish complete the installation of libtool libraries # install install libraries or executables # link create a library or an executable # uninstall remove libraries from an installed directory # # MODE-ARGS vary depending on the MODE. When passed as first option, # `--mode=MODE' may be abbreviated as `MODE' or a unique abbreviation of that. # Try `$progname --help --mode=MODE' for a more detailed description of MODE. # # When reporting a bug, please describe a test case to reproduce it and # include the following information: # # host-triplet: $host # shell: $SHELL # compiler: $LTCC # compiler flags: $LTCFLAGS # linker: $LD (gnu? $with_gnu_ld) # $progname: (GNU libtool) 2.4.2 # automake: $automake_version # autoconf: $autoconf_version # # Report bugs to . # GNU libtool home page: . # General help using GNU software: . PROGRAM=libtool PACKAGE=libtool VERSION=2.4.2 TIMESTAMP="" package_revision=1.3337 # Be Bourne compatible if test -n "${ZSH_VERSION+set}" && (emulate sh) >/dev/null 2>&1; then emulate sh NULLCMD=: # Zsh 3.x and 4.x performs word splitting on ${1+"$@"}, which # is contrary to our usage. Disable this feature. alias -g '${1+"$@"}'='"$@"' setopt NO_GLOB_SUBST else case `(set -o) 2>/dev/null` in *posix*) set -o posix;; esac fi BIN_SH=xpg4; export BIN_SH # for Tru64 DUALCASE=1; export DUALCASE # for MKS sh # A function that is used when there is no print builtin or printf. func_fallback_echo () { eval 'cat <<_LTECHO_EOF $1 _LTECHO_EOF' } # NLS nuisances: We save the old values to restore during execute mode. lt_user_locale= lt_safe_locale= for lt_var in LANG LANGUAGE LC_ALL LC_CTYPE LC_COLLATE LC_MESSAGES do eval "if test \"\${$lt_var+set}\" = set; then save_$lt_var=\$$lt_var $lt_var=C export $lt_var lt_user_locale=\"$lt_var=\\\$save_\$lt_var; \$lt_user_locale\" lt_safe_locale=\"$lt_var=C; \$lt_safe_locale\" fi" done LC_ALL=C LANGUAGE=C export LANGUAGE LC_ALL $lt_unset CDPATH # Work around backward compatibility issue on IRIX 6.5. On IRIX 6.4+, sh # is ksh but when the shell is invoked as "sh" and the current value of # the _XPG environment variable is not equal to 1 (one), the special # positional parameter $0, within a function call, is the name of the # function. progpath="$0" : ${CP="cp -f"} test "${ECHO+set}" = set || ECHO=${as_echo-'printf %s\n'} : ${MAKE="make"} : ${MKDIR="mkdir"} : ${MV="mv -f"} : ${RM="rm -f"} : ${SHELL="${CONFIG_SHELL-/bin/sh}"} : ${Xsed="$SED -e 1s/^X//"} # Global variables: EXIT_SUCCESS=0 EXIT_FAILURE=1 EXIT_MISMATCH=63 # $? = 63 is used to indicate version mismatch to missing. EXIT_SKIP=77 # $? = 77 is used to indicate a skipped test to automake. exit_status=$EXIT_SUCCESS # Make sure IFS has a sensible default lt_nl=' ' IFS=" $lt_nl" dirname="s,/[^/]*$,," basename="s,^.*/,," # func_dirname file append nondir_replacement # Compute the dirname of FILE. If nonempty, add APPEND to the result, # otherwise set result to NONDIR_REPLACEMENT. func_dirname () { func_dirname_result=`$ECHO "${1}" | $SED "$dirname"` if test "X$func_dirname_result" = "X${1}"; then func_dirname_result="${3}" else func_dirname_result="$func_dirname_result${2}" fi } # func_dirname may be replaced by extended shell implementation # func_basename file func_basename () { func_basename_result=`$ECHO "${1}" | $SED "$basename"` } # func_basename may be replaced by extended shell implementation # func_dirname_and_basename file append nondir_replacement # perform func_basename and func_dirname in a single function # call: # dirname: Compute the dirname of FILE. If nonempty, # add APPEND to the result, otherwise set result # to NONDIR_REPLACEMENT. # value returned in "$func_dirname_result" # basename: Compute filename of FILE. # value retuned in "$func_basename_result" # Implementation must be kept synchronized with func_dirname # and func_basename. For efficiency, we do not delegate to # those functions but instead duplicate the functionality here. func_dirname_and_basename () { # Extract subdirectory from the argument. func_dirname_result=`$ECHO "${1}" | $SED -e "$dirname"` if test "X$func_dirname_result" = "X${1}"; then func_dirname_result="${3}" else func_dirname_result="$func_dirname_result${2}" fi func_basename_result=`$ECHO "${1}" | $SED -e "$basename"` } # func_dirname_and_basename may be replaced by extended shell implementation # func_stripname prefix suffix name # strip PREFIX and SUFFIX off of NAME. # PREFIX and SUFFIX must not contain globbing or regex special # characters, hashes, percent signs, but SUFFIX may contain a leading # dot (in which case that matches only a dot). # func_strip_suffix prefix name func_stripname () { case ${2} in .*) func_stripname_result=`$ECHO "${3}" | $SED "s%^${1}%%; s%\\\\${2}\$%%"`;; *) func_stripname_result=`$ECHO "${3}" | $SED "s%^${1}%%; s%${2}\$%%"`;; esac } # func_stripname may be replaced by extended shell implementation # These SED scripts presuppose an absolute path with a trailing slash. pathcar='s,^/\([^/]*\).*$,\1,' pathcdr='s,^/[^/]*,,' removedotparts=':dotsl s@/\./@/@g t dotsl s,/\.$,/,' collapseslashes='s@/\{1,\}@/@g' finalslash='s,/*$,/,' # func_normal_abspath PATH # Remove doubled-up and trailing slashes, "." path components, # and cancel out any ".." path components in PATH after making # it an absolute path. # value returned in "$func_normal_abspath_result" func_normal_abspath () { # Start from root dir and reassemble the path. func_normal_abspath_result= func_normal_abspath_tpath=$1 func_normal_abspath_altnamespace= case $func_normal_abspath_tpath in "") # Empty path, that just means $cwd. func_stripname '' '/' "`pwd`" func_normal_abspath_result=$func_stripname_result return ;; # The next three entries are used to spot a run of precisely # two leading slashes without using negated character classes; # we take advantage of case's first-match behaviour. ///*) # Unusual form of absolute path, do nothing. ;; //*) # Not necessarily an ordinary path; POSIX reserves leading '//' # and for example Cygwin uses it to access remote file shares # over CIFS/SMB, so we conserve a leading double slash if found. func_normal_abspath_altnamespace=/ ;; /*) # Absolute path, do nothing. ;; *) # Relative path, prepend $cwd. func_normal_abspath_tpath=`pwd`/$func_normal_abspath_tpath ;; esac # Cancel out all the simple stuff to save iterations. We also want # the path to end with a slash for ease of parsing, so make sure # there is one (and only one) here. func_normal_abspath_tpath=`$ECHO "$func_normal_abspath_tpath" | $SED \ -e "$removedotparts" -e "$collapseslashes" -e "$finalslash"` while :; do # Processed it all yet? if test "$func_normal_abspath_tpath" = / ; then # If we ascended to the root using ".." the result may be empty now. if test -z "$func_normal_abspath_result" ; then func_normal_abspath_result=/ fi break fi func_normal_abspath_tcomponent=`$ECHO "$func_normal_abspath_tpath" | $SED \ -e "$pathcar"` func_normal_abspath_tpath=`$ECHO "$func_normal_abspath_tpath" | $SED \ -e "$pathcdr"` # Figure out what to do with it case $func_normal_abspath_tcomponent in "") # Trailing empty path component, ignore it. ;; ..) # Parent dir; strip last assembled component from result. func_dirname "$func_normal_abspath_result" func_normal_abspath_result=$func_dirname_result ;; *) # Actual path component, append it. func_normal_abspath_result=$func_normal_abspath_result/$func_normal_abspath_tcomponent ;; esac done # Restore leading double-slash if one was found on entry. func_normal_abspath_result=$func_normal_abspath_altnamespace$func_normal_abspath_result } # func_relative_path SRCDIR DSTDIR # generates a relative path from SRCDIR to DSTDIR, with a trailing # slash if non-empty, suitable for immediately appending a filename # without needing to append a separator. # value returned in "$func_relative_path_result" func_relative_path () { func_relative_path_result= func_normal_abspath "$1" func_relative_path_tlibdir=$func_normal_abspath_result func_normal_abspath "$2" func_relative_path_tbindir=$func_normal_abspath_result # Ascend the tree starting from libdir while :; do # check if we have found a prefix of bindir case $func_relative_path_tbindir in $func_relative_path_tlibdir) # found an exact match func_relative_path_tcancelled= break ;; $func_relative_path_tlibdir*) # found a matching prefix func_stripname "$func_relative_path_tlibdir" '' "$func_relative_path_tbindir" func_relative_path_tcancelled=$func_stripname_result if test -z "$func_relative_path_result"; then func_relative_path_result=. fi break ;; *) func_dirname $func_relative_path_tlibdir func_relative_path_tlibdir=${func_dirname_result} if test "x$func_relative_path_tlibdir" = x ; then # Have to descend all the way to the root! func_relative_path_result=../$func_relative_path_result func_relative_path_tcancelled=$func_relative_path_tbindir break fi func_relative_path_result=../$func_relative_path_result ;; esac done # Now calculate path; take care to avoid doubling-up slashes. func_stripname '' '/' "$func_relative_path_result" func_relative_path_result=$func_stripname_result func_stripname '/' '/' "$func_relative_path_tcancelled" if test "x$func_stripname_result" != x ; then func_relative_path_result=${func_relative_path_result}/${func_stripname_result} fi # Normalisation. If bindir is libdir, return empty string, # else relative path ending with a slash; either way, target # file name can be directly appended. if test ! -z "$func_relative_path_result"; then func_stripname './' '' "$func_relative_path_result/" func_relative_path_result=$func_stripname_result fi } # The name of this program: func_dirname_and_basename "$progpath" progname=$func_basename_result # Make sure we have an absolute path for reexecution: case $progpath in [\\/]*|[A-Za-z]:\\*) ;; *[\\/]*) progdir=$func_dirname_result progdir=`cd "$progdir" && pwd` progpath="$progdir/$progname" ;; *) save_IFS="$IFS" IFS=${PATH_SEPARATOR-:} for progdir in $PATH; do IFS="$save_IFS" test -x "$progdir/$progname" && break done IFS="$save_IFS" test -n "$progdir" || progdir=`pwd` progpath="$progdir/$progname" ;; esac # Sed substitution that helps us do robust quoting. It backslashifies # metacharacters that are still active within double-quoted strings. Xsed="${SED}"' -e 1s/^X//' sed_quote_subst='s/\([`"$\\]\)/\\\1/g' # Same as above, but do not quote variable references. double_quote_subst='s/\(["`\\]\)/\\\1/g' # Sed substitution that turns a string into a regex matching for the # string literally. sed_make_literal_regex='s,[].[^$\\*\/],\\&,g' # Sed substitution that converts a w32 file name or path # which contains forward slashes, into one that contains # (escaped) backslashes. A very naive implementation. lt_sed_naive_backslashify='s|\\\\*|\\|g;s|/|\\|g;s|\\|\\\\|g' # Re-`\' parameter expansions in output of double_quote_subst that were # `\'-ed in input to the same. If an odd number of `\' preceded a '$' # in input to double_quote_subst, that '$' was protected from expansion. # Since each input `\' is now two `\'s, look for any number of runs of # four `\'s followed by two `\'s and then a '$'. `\' that '$'. bs='\\' bs2='\\\\' bs4='\\\\\\\\' dollar='\$' sed_double_backslash="\ s/$bs4/&\\ /g s/^$bs2$dollar/$bs&/ s/\\([^$bs]\\)$bs2$dollar/\\1$bs2$bs$dollar/g s/\n//g" # Standard options: opt_dry_run=false opt_help=false opt_quiet=false opt_verbose=false opt_warning=: # func_echo arg... # Echo program name prefixed message, along with the current mode # name if it has been set yet. func_echo () { $ECHO "$progname: ${opt_mode+$opt_mode: }$*" } # func_verbose arg... # Echo program name prefixed message in verbose mode only. func_verbose () { $opt_verbose && func_echo ${1+"$@"} # A bug in bash halts the script if the last line of a function # fails when set -e is in force, so we need another command to # work around that: : } # func_echo_all arg... # Invoke $ECHO with all args, space-separated. func_echo_all () { $ECHO "$*" } # func_error arg... # Echo program name prefixed message to standard error. func_error () { $ECHO "$progname: ${opt_mode+$opt_mode: }"${1+"$@"} 1>&2 } # func_warning arg... # Echo program name prefixed warning message to standard error. func_warning () { $opt_warning && $ECHO "$progname: ${opt_mode+$opt_mode: }warning: "${1+"$@"} 1>&2 # bash bug again: : } # func_fatal_error arg... # Echo program name prefixed message to standard error, and exit. func_fatal_error () { func_error ${1+"$@"} exit $EXIT_FAILURE } # func_fatal_help arg... # Echo program name prefixed message to standard error, followed by # a help hint, and exit. func_fatal_help () { func_error ${1+"$@"} func_fatal_error "$help" } help="Try \`$progname --help' for more information." ## default # func_grep expression filename # Check whether EXPRESSION matches any line of FILENAME, without output. func_grep () { $GREP "$1" "$2" >/dev/null 2>&1 } # func_mkdir_p directory-path # Make sure the entire path to DIRECTORY-PATH is available. func_mkdir_p () { my_directory_path="$1" my_dir_list= if test -n "$my_directory_path" && test "$opt_dry_run" != ":"; then # Protect directory names starting with `-' case $my_directory_path in -*) my_directory_path="./$my_directory_path" ;; esac # While some portion of DIR does not yet exist... while test ! -d "$my_directory_path"; do # ...make a list in topmost first order. Use a colon delimited # list incase some portion of path contains whitespace. my_dir_list="$my_directory_path:$my_dir_list" # If the last portion added has no slash in it, the list is done case $my_directory_path in */*) ;; *) break ;; esac # ...otherwise throw away the child directory and loop my_directory_path=`$ECHO "$my_directory_path" | $SED -e "$dirname"` done my_dir_list=`$ECHO "$my_dir_list" | $SED 's,:*$,,'` save_mkdir_p_IFS="$IFS"; IFS=':' for my_dir in $my_dir_list; do IFS="$save_mkdir_p_IFS" # mkdir can fail with a `File exist' error if two processes # try to create one of the directories concurrently. Don't # stop in that case! $MKDIR "$my_dir" 2>/dev/null || : done IFS="$save_mkdir_p_IFS" # Bail out if we (or some other process) failed to create a directory. test -d "$my_directory_path" || \ func_fatal_error "Failed to create \`$1'" fi } # func_mktempdir [string] # Make a temporary directory that won't clash with other running # libtool processes, and avoids race conditions if possible. If # given, STRING is the basename for that directory. func_mktempdir () { my_template="${TMPDIR-/tmp}/${1-$progname}" if test "$opt_dry_run" = ":"; then # Return a directory name, but don't create it in dry-run mode my_tmpdir="${my_template}-$$" else # If mktemp works, use that first and foremost my_tmpdir=`mktemp -d "${my_template}-XXXXXXXX" 2>/dev/null` if test ! -d "$my_tmpdir"; then # Failing that, at least try and use $RANDOM to avoid a race my_tmpdir="${my_template}-${RANDOM-0}$$" save_mktempdir_umask=`umask` umask 0077 $MKDIR "$my_tmpdir" umask $save_mktempdir_umask fi # If we're not in dry-run mode, bomb out on failure test -d "$my_tmpdir" || \ func_fatal_error "cannot create temporary directory \`$my_tmpdir'" fi $ECHO "$my_tmpdir" } # func_quote_for_eval arg # Aesthetically quote ARG to be evaled later. # This function returns two values: FUNC_QUOTE_FOR_EVAL_RESULT # is double-quoted, suitable for a subsequent eval, whereas # FUNC_QUOTE_FOR_EVAL_UNQUOTED_RESULT has merely all characters # which are still active within double quotes backslashified. func_quote_for_eval () { case $1 in *[\\\`\"\$]*) func_quote_for_eval_unquoted_result=`$ECHO "$1" | $SED "$sed_quote_subst"` ;; *) func_quote_for_eval_unquoted_result="$1" ;; esac case $func_quote_for_eval_unquoted_result in # Double-quote args containing shell metacharacters to delay # word splitting, command substitution and and variable # expansion for a subsequent eval. # Many Bourne shells cannot handle close brackets correctly # in scan sets, so we specify it separately. *[\[\~\#\^\&\*\(\)\{\}\|\;\<\>\?\'\ \ ]*|*]*|"") func_quote_for_eval_result="\"$func_quote_for_eval_unquoted_result\"" ;; *) func_quote_for_eval_result="$func_quote_for_eval_unquoted_result" esac } # func_quote_for_expand arg # Aesthetically quote ARG to be evaled later; same as above, # but do not quote variable references. func_quote_for_expand () { case $1 in *[\\\`\"]*) my_arg=`$ECHO "$1" | $SED \ -e "$double_quote_subst" -e "$sed_double_backslash"` ;; *) my_arg="$1" ;; esac case $my_arg in # Double-quote args containing shell metacharacters to delay # word splitting and command substitution for a subsequent eval. # Many Bourne shells cannot handle close brackets correctly # in scan sets, so we specify it separately. *[\[\~\#\^\&\*\(\)\{\}\|\;\<\>\?\'\ \ ]*|*]*|"") my_arg="\"$my_arg\"" ;; esac func_quote_for_expand_result="$my_arg" } # func_show_eval cmd [fail_exp] # Unless opt_silent is true, then output CMD. Then, if opt_dryrun is # not true, evaluate CMD. If the evaluation of CMD fails, and FAIL_EXP # is given, then evaluate it. func_show_eval () { my_cmd="$1" my_fail_exp="${2-:}" ${opt_silent-false} || { func_quote_for_expand "$my_cmd" eval "func_echo $func_quote_for_expand_result" } if ${opt_dry_run-false}; then :; else eval "$my_cmd" my_status=$? if test "$my_status" -eq 0; then :; else eval "(exit $my_status); $my_fail_exp" fi fi } # func_show_eval_locale cmd [fail_exp] # Unless opt_silent is true, then output CMD. Then, if opt_dryrun is # not true, evaluate CMD. If the evaluation of CMD fails, and FAIL_EXP # is given, then evaluate it. Use the saved locale for evaluation. func_show_eval_locale () { my_cmd="$1" my_fail_exp="${2-:}" ${opt_silent-false} || { func_quote_for_expand "$my_cmd" eval "func_echo $func_quote_for_expand_result" } if ${opt_dry_run-false}; then :; else eval "$lt_user_locale $my_cmd" my_status=$? eval "$lt_safe_locale" if test "$my_status" -eq 0; then :; else eval "(exit $my_status); $my_fail_exp" fi fi } # func_tr_sh # Turn $1 into a string suitable for a shell variable name. # Result is stored in $func_tr_sh_result. All characters # not in the set a-zA-Z0-9_ are replaced with '_'. Further, # if $1 begins with a digit, a '_' is prepended as well. func_tr_sh () { case $1 in [0-9]* | *[!a-zA-Z0-9_]*) func_tr_sh_result=`$ECHO "$1" | $SED 's/^\([0-9]\)/_\1/; s/[^a-zA-Z0-9_]/_/g'` ;; * ) func_tr_sh_result=$1 ;; esac } # func_version # Echo version message to standard output and exit. func_version () { $opt_debug $SED -n '/(C)/!b go :more /\./!{ N s/\n# / / b more } :go /^# '$PROGRAM' (GNU /,/# warranty; / { s/^# // s/^# *$// s/\((C)\)[ 0-9,-]*\( [1-9][0-9]*\)/\1\2/ p }' < "$progpath" exit $? } # func_usage # Echo short help message to standard output and exit. func_usage () { $opt_debug $SED -n '/^# Usage:/,/^# *.*--help/ { s/^# // s/^# *$// s/\$progname/'$progname'/ p }' < "$progpath" echo $ECHO "run \`$progname --help | more' for full usage" exit $? } # func_help [NOEXIT] # Echo long help message to standard output and exit, # unless 'noexit' is passed as argument. func_help () { $opt_debug $SED -n '/^# Usage:/,/# Report bugs to/ { :print s/^# // s/^# *$// s*\$progname*'$progname'* s*\$host*'"$host"'* s*\$SHELL*'"$SHELL"'* s*\$LTCC*'"$LTCC"'* s*\$LTCFLAGS*'"$LTCFLAGS"'* s*\$LD*'"$LD"'* s/\$with_gnu_ld/'"$with_gnu_ld"'/ s/\$automake_version/'"`(${AUTOMAKE-automake} --version) 2>/dev/null |$SED 1q`"'/ s/\$autoconf_version/'"`(${AUTOCONF-autoconf} --version) 2>/dev/null |$SED 1q`"'/ p d } /^# .* home page:/b print /^# General help using/b print ' < "$progpath" ret=$? if test -z "$1"; then exit $ret fi } # func_missing_arg argname # Echo program name prefixed message to standard error and set global # exit_cmd. func_missing_arg () { $opt_debug func_error "missing argument for $1." exit_cmd=exit } # func_split_short_opt shortopt # Set func_split_short_opt_name and func_split_short_opt_arg shell # variables after splitting SHORTOPT after the 2nd character. func_split_short_opt () { my_sed_short_opt='1s/^\(..\).*$/\1/;q' my_sed_short_rest='1s/^..\(.*\)$/\1/;q' func_split_short_opt_name=`$ECHO "$1" | $SED "$my_sed_short_opt"` func_split_short_opt_arg=`$ECHO "$1" | $SED "$my_sed_short_rest"` } # func_split_short_opt may be replaced by extended shell implementation # func_split_long_opt longopt # Set func_split_long_opt_name and func_split_long_opt_arg shell # variables after splitting LONGOPT at the `=' sign. func_split_long_opt () { my_sed_long_opt='1s/^\(--[^=]*\)=.*/\1/;q' my_sed_long_arg='1s/^--[^=]*=//' func_split_long_opt_name=`$ECHO "$1" | $SED "$my_sed_long_opt"` func_split_long_opt_arg=`$ECHO "$1" | $SED "$my_sed_long_arg"` } # func_split_long_opt may be replaced by extended shell implementation exit_cmd=: magic="%%%MAGIC variable%%%" magic_exe="%%%MAGIC EXE variable%%%" # Global variables. nonopt= preserve_args= lo2o="s/\\.lo\$/.${objext}/" o2lo="s/\\.${objext}\$/.lo/" extracted_archives= extracted_serial=0 # If this variable is set in any of the actions, the command in it # will be execed at the end. This prevents here-documents from being # left over by shells. exec_cmd= # func_append var value # Append VALUE to the end of shell variable VAR. func_append () { eval "${1}=\$${1}\${2}" } # func_append may be replaced by extended shell implementation # func_append_quoted var value # Quote VALUE and append to the end of shell variable VAR, separated # by a space. func_append_quoted () { func_quote_for_eval "${2}" eval "${1}=\$${1}\\ \$func_quote_for_eval_result" } # func_append_quoted may be replaced by extended shell implementation # func_arith arithmetic-term... func_arith () { func_arith_result=`expr "${@}"` } # func_arith may be replaced by extended shell implementation # func_len string # STRING may not start with a hyphen. func_len () { func_len_result=`expr "${1}" : ".*" 2>/dev/null || echo $max_cmd_len` } # func_len may be replaced by extended shell implementation # func_lo2o object func_lo2o () { func_lo2o_result=`$ECHO "${1}" | $SED "$lo2o"` } # func_lo2o may be replaced by extended shell implementation # func_xform libobj-or-source func_xform () { func_xform_result=`$ECHO "${1}" | $SED 's/\.[^.]*$/.lo/'` } # func_xform may be replaced by extended shell implementation # func_fatal_configuration arg... # Echo program name prefixed message to standard error, followed by # a configuration failure hint, and exit. func_fatal_configuration () { func_error ${1+"$@"} func_error "See the $PACKAGE documentation for more information." func_fatal_error "Fatal configuration error." } # func_config # Display the configuration for all the tags in this script. func_config () { re_begincf='^# ### BEGIN LIBTOOL' re_endcf='^# ### END LIBTOOL' # Default configuration. $SED "1,/$re_begincf CONFIG/d;/$re_endcf CONFIG/,\$d" < "$progpath" # Now print the configurations for the tags. for tagname in $taglist; do $SED -n "/$re_begincf TAG CONFIG: $tagname\$/,/$re_endcf TAG CONFIG: $tagname\$/p" < "$progpath" done exit $? } # func_features # Display the features supported by this script. func_features () { echo "host: $host" if test "$build_libtool_libs" = yes; then echo "enable shared libraries" else echo "disable shared libraries" fi if test "$build_old_libs" = yes; then echo "enable static libraries" else echo "disable static libraries" fi exit $? } # func_enable_tag tagname # Verify that TAGNAME is valid, and either flag an error and exit, or # enable the TAGNAME tag. We also add TAGNAME to the global $taglist # variable here. func_enable_tag () { # Global variable: tagname="$1" re_begincf="^# ### BEGIN LIBTOOL TAG CONFIG: $tagname\$" re_endcf="^# ### END LIBTOOL TAG CONFIG: $tagname\$" sed_extractcf="/$re_begincf/,/$re_endcf/p" # Validate tagname. case $tagname in *[!-_A-Za-z0-9,/]*) func_fatal_error "invalid tag name: $tagname" ;; esac # Don't test for the "default" C tag, as we know it's # there but not specially marked. case $tagname in CC) ;; *) if $GREP "$re_begincf" "$progpath" >/dev/null 2>&1; then taglist="$taglist $tagname" # Evaluate the configuration. Be careful to quote the path # and the sed script, to avoid splitting on whitespace, but # also don't use non-portable quotes within backquotes within # quotes we have to do it in 2 steps: extractedcf=`$SED -n -e "$sed_extractcf" < "$progpath"` eval "$extractedcf" else func_error "ignoring unknown tag $tagname" fi ;; esac } # func_check_version_match # Ensure that we are using m4 macros, and libtool script from the same # release of libtool. func_check_version_match () { if test "$package_revision" != "$macro_revision"; then if test "$VERSION" != "$macro_version"; then if test -z "$macro_version"; then cat >&2 <<_LT_EOF $progname: Version mismatch error. This is $PACKAGE $VERSION, but the $progname: definition of this LT_INIT comes from an older release. $progname: You should recreate aclocal.m4 with macros from $PACKAGE $VERSION $progname: and run autoconf again. _LT_EOF else cat >&2 <<_LT_EOF $progname: Version mismatch error. This is $PACKAGE $VERSION, but the $progname: definition of this LT_INIT comes from $PACKAGE $macro_version. $progname: You should recreate aclocal.m4 with macros from $PACKAGE $VERSION $progname: and run autoconf again. _LT_EOF fi else cat >&2 <<_LT_EOF $progname: Version mismatch error. This is $PACKAGE $VERSION, revision $package_revision, $progname: but the definition of this LT_INIT comes from revision $macro_revision. $progname: You should recreate aclocal.m4 with macros from revision $package_revision $progname: of $PACKAGE $VERSION and run autoconf again. _LT_EOF fi exit $EXIT_MISMATCH fi } # Shorthand for --mode=foo, only valid as the first argument case $1 in clean|clea|cle|cl) shift; set dummy --mode clean ${1+"$@"}; shift ;; compile|compil|compi|comp|com|co|c) shift; set dummy --mode compile ${1+"$@"}; shift ;; execute|execut|execu|exec|exe|ex|e) shift; set dummy --mode execute ${1+"$@"}; shift ;; finish|finis|fini|fin|fi|f) shift; set dummy --mode finish ${1+"$@"}; shift ;; install|instal|insta|inst|ins|in|i) shift; set dummy --mode install ${1+"$@"}; shift ;; link|lin|li|l) shift; set dummy --mode link ${1+"$@"}; shift ;; uninstall|uninstal|uninsta|uninst|unins|unin|uni|un|u) shift; set dummy --mode uninstall ${1+"$@"}; shift ;; esac # Option defaults: opt_debug=: opt_dry_run=false opt_config=false opt_preserve_dup_deps=false opt_features=false opt_finish=false opt_help=false opt_help_all=false opt_silent=: opt_warning=: opt_verbose=: opt_silent=false opt_verbose=false # Parse options once, thoroughly. This comes as soon as possible in the # script to make things like `--version' happen as quickly as we can. { # this just eases exit handling while test $# -gt 0; do opt="$1" shift case $opt in --debug|-x) opt_debug='set -x' func_echo "enabling shell trace mode" $opt_debug ;; --dry-run|--dryrun|-n) opt_dry_run=: ;; --config) opt_config=: func_config ;; --dlopen|-dlopen) optarg="$1" opt_dlopen="${opt_dlopen+$opt_dlopen }$optarg" shift ;; --preserve-dup-deps) opt_preserve_dup_deps=: ;; --features) opt_features=: func_features ;; --finish) opt_finish=: set dummy --mode finish ${1+"$@"}; shift ;; --help) opt_help=: ;; --help-all) opt_help_all=: opt_help=': help-all' ;; --mode) test $# = 0 && func_missing_arg $opt && break optarg="$1" opt_mode="$optarg" case $optarg in # Valid mode arguments: clean|compile|execute|finish|install|link|relink|uninstall) ;; # Catch anything else as an error *) func_error "invalid argument for $opt" exit_cmd=exit break ;; esac shift ;; --no-silent|--no-quiet) opt_silent=false func_append preserve_args " $opt" ;; --no-warning|--no-warn) opt_warning=false func_append preserve_args " $opt" ;; --no-verbose) opt_verbose=false func_append preserve_args " $opt" ;; --silent|--quiet) opt_silent=: func_append preserve_args " $opt" opt_verbose=false ;; --verbose|-v) opt_verbose=: func_append preserve_args " $opt" opt_silent=false ;; --tag) test $# = 0 && func_missing_arg $opt && break optarg="$1" opt_tag="$optarg" func_append preserve_args " $opt $optarg" func_enable_tag "$optarg" shift ;; -\?|-h) func_usage ;; --help) func_help ;; --version) func_version ;; # Separate optargs to long options: --*=*) func_split_long_opt "$opt" set dummy "$func_split_long_opt_name" "$func_split_long_opt_arg" ${1+"$@"} shift ;; # Separate non-argument short options: -\?*|-h*|-n*|-v*) func_split_short_opt "$opt" set dummy "$func_split_short_opt_name" "-$func_split_short_opt_arg" ${1+"$@"} shift ;; --) break ;; -*) func_fatal_help "unrecognized option \`$opt'" ;; *) set dummy "$opt" ${1+"$@"}; shift; break ;; esac done # Validate options: # save first non-option argument if test "$#" -gt 0; then nonopt="$opt" shift fi # preserve --debug test "$opt_debug" = : || func_append preserve_args " --debug" case $host in *cygwin* | *mingw* | *pw32* | *cegcc*) # don't eliminate duplications in $postdeps and $predeps opt_duplicate_compiler_generated_deps=: ;; *) opt_duplicate_compiler_generated_deps=$opt_preserve_dup_deps ;; esac $opt_help || { # Sanity checks first: func_check_version_match if test "$build_libtool_libs" != yes && test "$build_old_libs" != yes; then func_fatal_configuration "not configured to build any kind of library" fi # Darwin sucks eval std_shrext=\"$shrext_cmds\" # Only execute mode is allowed to have -dlopen flags. if test -n "$opt_dlopen" && test "$opt_mode" != execute; then func_error "unrecognized option \`-dlopen'" $ECHO "$help" 1>&2 exit $EXIT_FAILURE fi # Change the help message to a mode-specific one. generic_help="$help" help="Try \`$progname --help --mode=$opt_mode' for more information." } # Bail if the options were screwed $exit_cmd $EXIT_FAILURE } ## ----------- ## ## Main. ## ## ----------- ## # func_lalib_p file # True iff FILE is a libtool `.la' library or `.lo' object file. # This function is only a basic sanity check; it will hardly flush out # determined imposters. func_lalib_p () { test -f "$1" && $SED -e 4q "$1" 2>/dev/null \ | $GREP "^# Generated by .*$PACKAGE" > /dev/null 2>&1 } # func_lalib_unsafe_p file # True iff FILE is a libtool `.la' library or `.lo' object file. # This function implements the same check as func_lalib_p without # resorting to external programs. To this end, it redirects stdin and # closes it afterwards, without saving the original file descriptor. # As a safety measure, use it only where a negative result would be # fatal anyway. Works if `file' does not exist. func_lalib_unsafe_p () { lalib_p=no if test -f "$1" && test -r "$1" && exec 5<&0 <"$1"; then for lalib_p_l in 1 2 3 4 do read lalib_p_line case "$lalib_p_line" in \#\ Generated\ by\ *$PACKAGE* ) lalib_p=yes; break;; esac done exec 0<&5 5<&- fi test "$lalib_p" = yes } # func_ltwrapper_script_p file # True iff FILE is a libtool wrapper script # This function is only a basic sanity check; it will hardly flush out # determined imposters. func_ltwrapper_script_p () { func_lalib_p "$1" } # func_ltwrapper_executable_p file # True iff FILE is a libtool wrapper executable # This function is only a basic sanity check; it will hardly flush out # determined imposters. func_ltwrapper_executable_p () { func_ltwrapper_exec_suffix= case $1 in *.exe) ;; *) func_ltwrapper_exec_suffix=.exe ;; esac $GREP "$magic_exe" "$1$func_ltwrapper_exec_suffix" >/dev/null 2>&1 } # func_ltwrapper_scriptname file # Assumes file is an ltwrapper_executable # uses $file to determine the appropriate filename for a # temporary ltwrapper_script. func_ltwrapper_scriptname () { func_dirname_and_basename "$1" "" "." func_stripname '' '.exe' "$func_basename_result" func_ltwrapper_scriptname_result="$func_dirname_result/$objdir/${func_stripname_result}_ltshwrapper" } # func_ltwrapper_p file # True iff FILE is a libtool wrapper script or wrapper executable # This function is only a basic sanity check; it will hardly flush out # determined imposters. func_ltwrapper_p () { func_ltwrapper_script_p "$1" || func_ltwrapper_executable_p "$1" } # func_execute_cmds commands fail_cmd # Execute tilde-delimited COMMANDS. # If FAIL_CMD is given, eval that upon failure. # FAIL_CMD may read-access the current command in variable CMD! func_execute_cmds () { $opt_debug save_ifs=$IFS; IFS='~' for cmd in $1; do IFS=$save_ifs eval cmd=\"$cmd\" func_show_eval "$cmd" "${2-:}" done IFS=$save_ifs } # func_source file # Source FILE, adding directory component if necessary. # Note that it is not necessary on cygwin/mingw to append a dot to # FILE even if both FILE and FILE.exe exist: automatic-append-.exe # behavior happens only for exec(3), not for open(2)! Also, sourcing # `FILE.' does not work on cygwin managed mounts. func_source () { $opt_debug case $1 in */* | *\\*) . "$1" ;; *) . "./$1" ;; esac } # func_resolve_sysroot PATH # Replace a leading = in PATH with a sysroot. Store the result into # func_resolve_sysroot_result func_resolve_sysroot () { func_resolve_sysroot_result=$1 case $func_resolve_sysroot_result in =*) func_stripname '=' '' "$func_resolve_sysroot_result" func_resolve_sysroot_result=$lt_sysroot$func_stripname_result ;; esac } # func_replace_sysroot PATH # If PATH begins with the sysroot, replace it with = and # store the result into func_replace_sysroot_result. func_replace_sysroot () { case "$lt_sysroot:$1" in ?*:"$lt_sysroot"*) func_stripname "$lt_sysroot" '' "$1" func_replace_sysroot_result="=$func_stripname_result" ;; *) # Including no sysroot. func_replace_sysroot_result=$1 ;; esac } # func_infer_tag arg # Infer tagged configuration to use if any are available and # if one wasn't chosen via the "--tag" command line option. # Only attempt this if the compiler in the base compile # command doesn't match the default compiler. # arg is usually of the form 'gcc ...' func_infer_tag () { $opt_debug if test -n "$available_tags" && test -z "$tagname"; then CC_quoted= for arg in $CC; do func_append_quoted CC_quoted "$arg" done CC_expanded=`func_echo_all $CC` CC_quoted_expanded=`func_echo_all $CC_quoted` case $@ in # Blanks in the command may have been stripped by the calling shell, # but not from the CC environment variable when configure was run. " $CC "* | "$CC "* | " $CC_expanded "* | "$CC_expanded "* | \ " $CC_quoted"* | "$CC_quoted "* | " $CC_quoted_expanded "* | "$CC_quoted_expanded "*) ;; # Blanks at the start of $base_compile will cause this to fail # if we don't check for them as well. *) for z in $available_tags; do if $GREP "^# ### BEGIN LIBTOOL TAG CONFIG: $z$" < "$progpath" > /dev/null; then # Evaluate the configuration. eval "`${SED} -n -e '/^# ### BEGIN LIBTOOL TAG CONFIG: '$z'$/,/^# ### END LIBTOOL TAG CONFIG: '$z'$/p' < $progpath`" CC_quoted= for arg in $CC; do # Double-quote args containing other shell metacharacters. func_append_quoted CC_quoted "$arg" done CC_expanded=`func_echo_all $CC` CC_quoted_expanded=`func_echo_all $CC_quoted` case "$@ " in " $CC "* | "$CC "* | " $CC_expanded "* | "$CC_expanded "* | \ " $CC_quoted"* | "$CC_quoted "* | " $CC_quoted_expanded "* | "$CC_quoted_expanded "*) # The compiler in the base compile command matches # the one in the tagged configuration. # Assume this is the tagged configuration we want. tagname=$z break ;; esac fi done # If $tagname still isn't set, then no tagged configuration # was found and let the user know that the "--tag" command # line option must be used. if test -z "$tagname"; then func_echo "unable to infer tagged configuration" func_fatal_error "specify a tag with \`--tag'" # else # func_verbose "using $tagname tagged configuration" fi ;; esac fi } # func_write_libtool_object output_name pic_name nonpic_name # Create a libtool object file (analogous to a ".la" file), # but don't create it if we're doing a dry run. func_write_libtool_object () { write_libobj=${1} if test "$build_libtool_libs" = yes; then write_lobj=\'${2}\' else write_lobj=none fi if test "$build_old_libs" = yes; then write_oldobj=\'${3}\' else write_oldobj=none fi $opt_dry_run || { cat >${write_libobj}T </dev/null` if test "$?" -eq 0 && test -n "${func_convert_core_file_wine_to_w32_tmp}"; then func_convert_core_file_wine_to_w32_result=`$ECHO "$func_convert_core_file_wine_to_w32_tmp" | $SED -e "$lt_sed_naive_backslashify"` else func_convert_core_file_wine_to_w32_result= fi fi } # end: func_convert_core_file_wine_to_w32 # func_convert_core_path_wine_to_w32 ARG # Helper function used by path conversion functions when $build is *nix, and # $host is mingw, cygwin, or some other w32 environment. Relies on a correctly # configured wine environment available, with the winepath program in $build's # $PATH. Assumes ARG has no leading or trailing path separator characters. # # ARG is path to be converted from $build format to win32. # Result is available in $func_convert_core_path_wine_to_w32_result. # Unconvertible file (directory) names in ARG are skipped; if no directory names # are convertible, then the result may be empty. func_convert_core_path_wine_to_w32 () { $opt_debug # unfortunately, winepath doesn't convert paths, only file names func_convert_core_path_wine_to_w32_result="" if test -n "$1"; then oldIFS=$IFS IFS=: for func_convert_core_path_wine_to_w32_f in $1; do IFS=$oldIFS func_convert_core_file_wine_to_w32 "$func_convert_core_path_wine_to_w32_f" if test -n "$func_convert_core_file_wine_to_w32_result" ; then if test -z "$func_convert_core_path_wine_to_w32_result"; then func_convert_core_path_wine_to_w32_result="$func_convert_core_file_wine_to_w32_result" else func_append func_convert_core_path_wine_to_w32_result ";$func_convert_core_file_wine_to_w32_result" fi fi done IFS=$oldIFS fi } # end: func_convert_core_path_wine_to_w32 # func_cygpath ARGS... # Wrapper around calling the cygpath program via LT_CYGPATH. This is used when # when (1) $build is *nix and Cygwin is hosted via a wine environment; or (2) # $build is MSYS and $host is Cygwin, or (3) $build is Cygwin. In case (1) or # (2), returns the Cygwin file name or path in func_cygpath_result (input # file name or path is assumed to be in w32 format, as previously converted # from $build's *nix or MSYS format). In case (3), returns the w32 file name # or path in func_cygpath_result (input file name or path is assumed to be in # Cygwin format). Returns an empty string on error. # # ARGS are passed to cygpath, with the last one being the file name or path to # be converted. # # Specify the absolute *nix (or w32) name to cygpath in the LT_CYGPATH # environment variable; do not put it in $PATH. func_cygpath () { $opt_debug if test -n "$LT_CYGPATH" && test -f "$LT_CYGPATH"; then func_cygpath_result=`$LT_CYGPATH "$@" 2>/dev/null` if test "$?" -ne 0; then # on failure, ensure result is empty func_cygpath_result= fi else func_cygpath_result= func_error "LT_CYGPATH is empty or specifies non-existent file: \`$LT_CYGPATH'" fi } #end: func_cygpath # func_convert_core_msys_to_w32 ARG # Convert file name or path ARG from MSYS format to w32 format. Return # result in func_convert_core_msys_to_w32_result. func_convert_core_msys_to_w32 () { $opt_debug # awkward: cmd appends spaces to result func_convert_core_msys_to_w32_result=`( cmd //c echo "$1" ) 2>/dev/null | $SED -e 's/[ ]*$//' -e "$lt_sed_naive_backslashify"` } #end: func_convert_core_msys_to_w32 # func_convert_file_check ARG1 ARG2 # Verify that ARG1 (a file name in $build format) was converted to $host # format in ARG2. Otherwise, emit an error message, but continue (resetting # func_to_host_file_result to ARG1). func_convert_file_check () { $opt_debug if test -z "$2" && test -n "$1" ; then func_error "Could not determine host file name corresponding to" func_error " \`$1'" func_error "Continuing, but uninstalled executables may not work." # Fallback: func_to_host_file_result="$1" fi } # end func_convert_file_check # func_convert_path_check FROM_PATHSEP TO_PATHSEP FROM_PATH TO_PATH # Verify that FROM_PATH (a path in $build format) was converted to $host # format in TO_PATH. Otherwise, emit an error message, but continue, resetting # func_to_host_file_result to a simplistic fallback value (see below). func_convert_path_check () { $opt_debug if test -z "$4" && test -n "$3"; then func_error "Could not determine the host path corresponding to" func_error " \`$3'" func_error "Continuing, but uninstalled executables may not work." # Fallback. This is a deliberately simplistic "conversion" and # should not be "improved". See libtool.info. if test "x$1" != "x$2"; then lt_replace_pathsep_chars="s|$1|$2|g" func_to_host_path_result=`echo "$3" | $SED -e "$lt_replace_pathsep_chars"` else func_to_host_path_result="$3" fi fi } # end func_convert_path_check # func_convert_path_front_back_pathsep FRONTPAT BACKPAT REPL ORIG # Modifies func_to_host_path_result by prepending REPL if ORIG matches FRONTPAT # and appending REPL if ORIG matches BACKPAT. func_convert_path_front_back_pathsep () { $opt_debug case $4 in $1 ) func_to_host_path_result="$3$func_to_host_path_result" ;; esac case $4 in $2 ) func_append func_to_host_path_result "$3" ;; esac } # end func_convert_path_front_back_pathsep ################################################## # $build to $host FILE NAME CONVERSION FUNCTIONS # ################################################## # invoked via `$to_host_file_cmd ARG' # # In each case, ARG is the path to be converted from $build to $host format. # Result will be available in $func_to_host_file_result. # func_to_host_file ARG # Converts the file name ARG from $build format to $host format. Return result # in func_to_host_file_result. func_to_host_file () { $opt_debug $to_host_file_cmd "$1" } # end func_to_host_file # func_to_tool_file ARG LAZY # converts the file name ARG from $build format to toolchain format. Return # result in func_to_tool_file_result. If the conversion in use is listed # in (the comma separated) LAZY, no conversion takes place. func_to_tool_file () { $opt_debug case ,$2, in *,"$to_tool_file_cmd",*) func_to_tool_file_result=$1 ;; *) $to_tool_file_cmd "$1" func_to_tool_file_result=$func_to_host_file_result ;; esac } # end func_to_tool_file # func_convert_file_noop ARG # Copy ARG to func_to_host_file_result. func_convert_file_noop () { func_to_host_file_result="$1" } # end func_convert_file_noop # func_convert_file_msys_to_w32 ARG # Convert file name ARG from (mingw) MSYS to (mingw) w32 format; automatic # conversion to w32 is not available inside the cwrapper. Returns result in # func_to_host_file_result. func_convert_file_msys_to_w32 () { $opt_debug func_to_host_file_result="$1" if test -n "$1"; then func_convert_core_msys_to_w32 "$1" func_to_host_file_result="$func_convert_core_msys_to_w32_result" fi func_convert_file_check "$1" "$func_to_host_file_result" } # end func_convert_file_msys_to_w32 # func_convert_file_cygwin_to_w32 ARG # Convert file name ARG from Cygwin to w32 format. Returns result in # func_to_host_file_result. func_convert_file_cygwin_to_w32 () { $opt_debug func_to_host_file_result="$1" if test -n "$1"; then # because $build is cygwin, we call "the" cygpath in $PATH; no need to use # LT_CYGPATH in this case. func_to_host_file_result=`cygpath -m "$1"` fi func_convert_file_check "$1" "$func_to_host_file_result" } # end func_convert_file_cygwin_to_w32 # func_convert_file_nix_to_w32 ARG # Convert file name ARG from *nix to w32 format. Requires a wine environment # and a working winepath. Returns result in func_to_host_file_result. func_convert_file_nix_to_w32 () { $opt_debug func_to_host_file_result="$1" if test -n "$1"; then func_convert_core_file_wine_to_w32 "$1" func_to_host_file_result="$func_convert_core_file_wine_to_w32_result" fi func_convert_file_check "$1" "$func_to_host_file_result" } # end func_convert_file_nix_to_w32 # func_convert_file_msys_to_cygwin ARG # Convert file name ARG from MSYS to Cygwin format. Requires LT_CYGPATH set. # Returns result in func_to_host_file_result. func_convert_file_msys_to_cygwin () { $opt_debug func_to_host_file_result="$1" if test -n "$1"; then func_convert_core_msys_to_w32 "$1" func_cygpath -u "$func_convert_core_msys_to_w32_result" func_to_host_file_result="$func_cygpath_result" fi func_convert_file_check "$1" "$func_to_host_file_result" } # end func_convert_file_msys_to_cygwin # func_convert_file_nix_to_cygwin ARG # Convert file name ARG from *nix to Cygwin format. Requires Cygwin installed # in a wine environment, working winepath, and LT_CYGPATH set. Returns result # in func_to_host_file_result. func_convert_file_nix_to_cygwin () { $opt_debug func_to_host_file_result="$1" if test -n "$1"; then # convert from *nix to w32, then use cygpath to convert from w32 to cygwin. func_convert_core_file_wine_to_w32 "$1" func_cygpath -u "$func_convert_core_file_wine_to_w32_result" func_to_host_file_result="$func_cygpath_result" fi func_convert_file_check "$1" "$func_to_host_file_result" } # end func_convert_file_nix_to_cygwin ############################################# # $build to $host PATH CONVERSION FUNCTIONS # ############################################# # invoked via `$to_host_path_cmd ARG' # # In each case, ARG is the path to be converted from $build to $host format. # The result will be available in $func_to_host_path_result. # # Path separators are also converted from $build format to $host format. If # ARG begins or ends with a path separator character, it is preserved (but # converted to $host format) on output. # # All path conversion functions are named using the following convention: # file name conversion function : func_convert_file_X_to_Y () # path conversion function : func_convert_path_X_to_Y () # where, for any given $build/$host combination the 'X_to_Y' value is the # same. If conversion functions are added for new $build/$host combinations, # the two new functions must follow this pattern, or func_init_to_host_path_cmd # will break. # func_init_to_host_path_cmd # Ensures that function "pointer" variable $to_host_path_cmd is set to the # appropriate value, based on the value of $to_host_file_cmd. to_host_path_cmd= func_init_to_host_path_cmd () { $opt_debug if test -z "$to_host_path_cmd"; then func_stripname 'func_convert_file_' '' "$to_host_file_cmd" to_host_path_cmd="func_convert_path_${func_stripname_result}" fi } # func_to_host_path ARG # Converts the path ARG from $build format to $host format. Return result # in func_to_host_path_result. func_to_host_path () { $opt_debug func_init_to_host_path_cmd $to_host_path_cmd "$1" } # end func_to_host_path # func_convert_path_noop ARG # Copy ARG to func_to_host_path_result. func_convert_path_noop () { func_to_host_path_result="$1" } # end func_convert_path_noop # func_convert_path_msys_to_w32 ARG # Convert path ARG from (mingw) MSYS to (mingw) w32 format; automatic # conversion to w32 is not available inside the cwrapper. Returns result in # func_to_host_path_result. func_convert_path_msys_to_w32 () { $opt_debug func_to_host_path_result="$1" if test -n "$1"; then # Remove leading and trailing path separator characters from ARG. MSYS # behavior is inconsistent here; cygpath turns them into '.;' and ';.'; # and winepath ignores them completely. func_stripname : : "$1" func_to_host_path_tmp1=$func_stripname_result func_convert_core_msys_to_w32 "$func_to_host_path_tmp1" func_to_host_path_result="$func_convert_core_msys_to_w32_result" func_convert_path_check : ";" \ "$func_to_host_path_tmp1" "$func_to_host_path_result" func_convert_path_front_back_pathsep ":*" "*:" ";" "$1" fi } # end func_convert_path_msys_to_w32 # func_convert_path_cygwin_to_w32 ARG # Convert path ARG from Cygwin to w32 format. Returns result in # func_to_host_file_result. func_convert_path_cygwin_to_w32 () { $opt_debug func_to_host_path_result="$1" if test -n "$1"; then # See func_convert_path_msys_to_w32: func_stripname : : "$1" func_to_host_path_tmp1=$func_stripname_result func_to_host_path_result=`cygpath -m -p "$func_to_host_path_tmp1"` func_convert_path_check : ";" \ "$func_to_host_path_tmp1" "$func_to_host_path_result" func_convert_path_front_back_pathsep ":*" "*:" ";" "$1" fi } # end func_convert_path_cygwin_to_w32 # func_convert_path_nix_to_w32 ARG # Convert path ARG from *nix to w32 format. Requires a wine environment and # a working winepath. Returns result in func_to_host_file_result. func_convert_path_nix_to_w32 () { $opt_debug func_to_host_path_result="$1" if test -n "$1"; then # See func_convert_path_msys_to_w32: func_stripname : : "$1" func_to_host_path_tmp1=$func_stripname_result func_convert_core_path_wine_to_w32 "$func_to_host_path_tmp1" func_to_host_path_result="$func_convert_core_path_wine_to_w32_result" func_convert_path_check : ";" \ "$func_to_host_path_tmp1" "$func_to_host_path_result" func_convert_path_front_back_pathsep ":*" "*:" ";" "$1" fi } # end func_convert_path_nix_to_w32 # func_convert_path_msys_to_cygwin ARG # Convert path ARG from MSYS to Cygwin format. Requires LT_CYGPATH set. # Returns result in func_to_host_file_result. func_convert_path_msys_to_cygwin () { $opt_debug func_to_host_path_result="$1" if test -n "$1"; then # See func_convert_path_msys_to_w32: func_stripname : : "$1" func_to_host_path_tmp1=$func_stripname_result func_convert_core_msys_to_w32 "$func_to_host_path_tmp1" func_cygpath -u -p "$func_convert_core_msys_to_w32_result" func_to_host_path_result="$func_cygpath_result" func_convert_path_check : : \ "$func_to_host_path_tmp1" "$func_to_host_path_result" func_convert_path_front_back_pathsep ":*" "*:" : "$1" fi } # end func_convert_path_msys_to_cygwin # func_convert_path_nix_to_cygwin ARG # Convert path ARG from *nix to Cygwin format. Requires Cygwin installed in a # a wine environment, working winepath, and LT_CYGPATH set. Returns result in # func_to_host_file_result. func_convert_path_nix_to_cygwin () { $opt_debug func_to_host_path_result="$1" if test -n "$1"; then # Remove leading and trailing path separator characters from # ARG. msys behavior is inconsistent here, cygpath turns them # into '.;' and ';.', and winepath ignores them completely. func_stripname : : "$1" func_to_host_path_tmp1=$func_stripname_result func_convert_core_path_wine_to_w32 "$func_to_host_path_tmp1" func_cygpath -u -p "$func_convert_core_path_wine_to_w32_result" func_to_host_path_result="$func_cygpath_result" func_convert_path_check : : \ "$func_to_host_path_tmp1" "$func_to_host_path_result" func_convert_path_front_back_pathsep ":*" "*:" : "$1" fi } # end func_convert_path_nix_to_cygwin # func_mode_compile arg... func_mode_compile () { $opt_debug # Get the compilation command and the source file. base_compile= srcfile="$nonopt" # always keep a non-empty value in "srcfile" suppress_opt=yes suppress_output= arg_mode=normal libobj= later= pie_flag= for arg do case $arg_mode in arg ) # do not "continue". Instead, add this to base_compile lastarg="$arg" arg_mode=normal ;; target ) libobj="$arg" arg_mode=normal continue ;; normal ) # Accept any command-line options. case $arg in -o) test -n "$libobj" && \ func_fatal_error "you cannot specify \`-o' more than once" arg_mode=target continue ;; -pie | -fpie | -fPIE) func_append pie_flag " $arg" continue ;; -shared | -static | -prefer-pic | -prefer-non-pic) func_append later " $arg" continue ;; -no-suppress) suppress_opt=no continue ;; -Xcompiler) arg_mode=arg # the next one goes into the "base_compile" arg list continue # The current "srcfile" will either be retained or ;; # replaced later. I would guess that would be a bug. -Wc,*) func_stripname '-Wc,' '' "$arg" args=$func_stripname_result lastarg= save_ifs="$IFS"; IFS=',' for arg in $args; do IFS="$save_ifs" func_append_quoted lastarg "$arg" done IFS="$save_ifs" func_stripname ' ' '' "$lastarg" lastarg=$func_stripname_result # Add the arguments to base_compile. func_append base_compile " $lastarg" continue ;; *) # Accept the current argument as the source file. # The previous "srcfile" becomes the current argument. # lastarg="$srcfile" srcfile="$arg" ;; esac # case $arg ;; esac # case $arg_mode # Aesthetically quote the previous argument. func_append_quoted base_compile "$lastarg" done # for arg case $arg_mode in arg) func_fatal_error "you must specify an argument for -Xcompile" ;; target) func_fatal_error "you must specify a target with \`-o'" ;; *) # Get the name of the library object. test -z "$libobj" && { func_basename "$srcfile" libobj="$func_basename_result" } ;; esac # Recognize several different file suffixes. # If the user specifies -o file.o, it is replaced with file.lo case $libobj in *.[cCFSifmso] | \ *.ada | *.adb | *.ads | *.asm | \ *.c++ | *.cc | *.ii | *.class | *.cpp | *.cxx | \ *.[fF][09]? | *.for | *.java | *.go | *.obj | *.sx | *.cu | *.cup) func_xform "$libobj" libobj=$func_xform_result ;; esac case $libobj in *.lo) func_lo2o "$libobj"; obj=$func_lo2o_result ;; *) func_fatal_error "cannot determine name of library object from \`$libobj'" ;; esac func_infer_tag $base_compile for arg in $later; do case $arg in -shared) test "$build_libtool_libs" != yes && \ func_fatal_configuration "can not build a shared library" build_old_libs=no continue ;; -static) build_libtool_libs=no build_old_libs=yes continue ;; -prefer-pic) pic_mode=yes continue ;; -prefer-non-pic) pic_mode=no continue ;; esac done func_quote_for_eval "$libobj" test "X$libobj" != "X$func_quote_for_eval_result" \ && $ECHO "X$libobj" | $GREP '[]~#^*{};<>?"'"'"' &()|`$[]' \ && func_warning "libobj name \`$libobj' may not contain shell special characters." func_dirname_and_basename "$obj" "/" "" objname="$func_basename_result" xdir="$func_dirname_result" lobj=${xdir}$objdir/$objname test -z "$base_compile" && \ func_fatal_help "you must specify a compilation command" # Delete any leftover library objects. if test "$build_old_libs" = yes; then removelist="$obj $lobj $libobj ${libobj}T" else removelist="$lobj $libobj ${libobj}T" fi # On Cygwin there's no "real" PIC flag so we must build both object types case $host_os in cygwin* | mingw* | pw32* | os2* | cegcc*) pic_mode=default ;; esac if test "$pic_mode" = no && test "$deplibs_check_method" != pass_all; then # non-PIC code in shared libraries is not supported pic_mode=default fi # Calculate the filename of the output object if compiler does # not support -o with -c if test "$compiler_c_o" = no; then output_obj=`$ECHO "$srcfile" | $SED 's%^.*/%%; s%\.[^.]*$%%'`.${objext} lockfile="$output_obj.lock" else output_obj= need_locks=no lockfile= fi # Lock this critical section if it is needed # We use this script file to make the link, it avoids creating a new file if test "$need_locks" = yes; then until $opt_dry_run || ln "$progpath" "$lockfile" 2>/dev/null; do func_echo "Waiting for $lockfile to be removed" sleep 2 done elif test "$need_locks" = warn; then if test -f "$lockfile"; then $ECHO "\ *** ERROR, $lockfile exists and contains: `cat $lockfile 2>/dev/null` This indicates that another process is trying to use the same temporary object file, and libtool could not work around it because your compiler does not support \`-c' and \`-o' together. If you repeat this compilation, it may succeed, by chance, but you had better avoid parallel builds (make -j) in this platform, or get a better compiler." $opt_dry_run || $RM $removelist exit $EXIT_FAILURE fi func_append removelist " $output_obj" $ECHO "$srcfile" > "$lockfile" fi $opt_dry_run || $RM $removelist func_append removelist " $lockfile" trap '$opt_dry_run || $RM $removelist; exit $EXIT_FAILURE' 1 2 15 func_to_tool_file "$srcfile" func_convert_file_msys_to_w32 srcfile=$func_to_tool_file_result func_quote_for_eval "$srcfile" qsrcfile=$func_quote_for_eval_result # Only build a PIC object if we are building libtool libraries. if test "$build_libtool_libs" = yes; then # Without this assignment, base_compile gets emptied. fbsd_hideous_sh_bug=$base_compile if test "$pic_mode" != no; then command="$base_compile $qsrcfile $pic_flag" else # Don't build PIC code command="$base_compile $qsrcfile" fi func_mkdir_p "$xdir$objdir" if test -z "$output_obj"; then # Place PIC objects in $objdir func_append command " -o $lobj" fi func_show_eval_locale "$command" \ 'test -n "$output_obj" && $RM $removelist; exit $EXIT_FAILURE' if test "$need_locks" = warn && test "X`cat $lockfile 2>/dev/null`" != "X$srcfile"; then $ECHO "\ *** ERROR, $lockfile contains: `cat $lockfile 2>/dev/null` but it should contain: $srcfile This indicates that another process is trying to use the same temporary object file, and libtool could not work around it because your compiler does not support \`-c' and \`-o' together. If you repeat this compilation, it may succeed, by chance, but you had better avoid parallel builds (make -j) in this platform, or get a better compiler." $opt_dry_run || $RM $removelist exit $EXIT_FAILURE fi # Just move the object if needed, then go on to compile the next one if test -n "$output_obj" && test "X$output_obj" != "X$lobj"; then func_show_eval '$MV "$output_obj" "$lobj"' \ 'error=$?; $opt_dry_run || $RM $removelist; exit $error' fi # Allow error messages only from the first compilation. if test "$suppress_opt" = yes; then suppress_output=' >/dev/null 2>&1' fi fi # Only build a position-dependent object if we build old libraries. if test "$build_old_libs" = yes; then if test "$pic_mode" != yes; then # Don't build PIC code command="$base_compile $qsrcfile$pie_flag" else command="$base_compile $qsrcfile $pic_flag" fi if test "$compiler_c_o" = yes; then func_append command " -o $obj" fi # Suppress compiler output if we already did a PIC compilation. func_append command "$suppress_output" func_show_eval_locale "$command" \ '$opt_dry_run || $RM $removelist; exit $EXIT_FAILURE' if test "$need_locks" = warn && test "X`cat $lockfile 2>/dev/null`" != "X$srcfile"; then $ECHO "\ *** ERROR, $lockfile contains: `cat $lockfile 2>/dev/null` but it should contain: $srcfile This indicates that another process is trying to use the same temporary object file, and libtool could not work around it because your compiler does not support \`-c' and \`-o' together. If you repeat this compilation, it may succeed, by chance, but you had better avoid parallel builds (make -j) in this platform, or get a better compiler." $opt_dry_run || $RM $removelist exit $EXIT_FAILURE fi # Just move the object if needed if test -n "$output_obj" && test "X$output_obj" != "X$obj"; then func_show_eval '$MV "$output_obj" "$obj"' \ 'error=$?; $opt_dry_run || $RM $removelist; exit $error' fi fi $opt_dry_run || { func_write_libtool_object "$libobj" "$objdir/$objname" "$objname" # Unlock the critical section if it was locked if test "$need_locks" != no; then removelist=$lockfile $RM "$lockfile" fi } exit $EXIT_SUCCESS } $opt_help || { test "$opt_mode" = compile && func_mode_compile ${1+"$@"} } func_mode_help () { # We need to display help for each of the modes. case $opt_mode in "") # Generic help is extracted from the usage comments # at the start of this file. func_help ;; clean) $ECHO \ "Usage: $progname [OPTION]... --mode=clean RM [RM-OPTION]... FILE... Remove files from the build directory. RM is the name of the program to use to delete files associated with each FILE (typically \`/bin/rm'). RM-OPTIONS are options (such as \`-f') to be passed to RM. If FILE is a libtool library, object or program, all the files associated with it are deleted. Otherwise, only FILE itself is deleted using RM." ;; compile) $ECHO \ "Usage: $progname [OPTION]... --mode=compile COMPILE-COMMAND... SOURCEFILE Compile a source file into a libtool library object. This mode accepts the following additional options: -o OUTPUT-FILE set the output file name to OUTPUT-FILE -no-suppress do not suppress compiler output for multiple passes -prefer-pic try to build PIC objects only -prefer-non-pic try to build non-PIC objects only -shared do not build a \`.o' file suitable for static linking -static only build a \`.o' file suitable for static linking -Wc,FLAG pass FLAG directly to the compiler COMPILE-COMMAND is a command to be used in creating a \`standard' object file from the given SOURCEFILE. The output file name is determined by removing the directory component from SOURCEFILE, then substituting the C source code suffix \`.c' with the library object suffix, \`.lo'." ;; execute) $ECHO \ "Usage: $progname [OPTION]... --mode=execute COMMAND [ARGS]... Automatically set library path, then run a program. This mode accepts the following additional options: -dlopen FILE add the directory containing FILE to the library path This mode sets the library path environment variable according to \`-dlopen' flags. If any of the ARGS are libtool executable wrappers, then they are translated into their corresponding uninstalled binary, and any of their required library directories are added to the library path. Then, COMMAND is executed, with ARGS as arguments." ;; finish) $ECHO \ "Usage: $progname [OPTION]... --mode=finish [LIBDIR]... Complete the installation of libtool libraries. Each LIBDIR is a directory that contains libtool libraries. The commands that this mode executes may require superuser privileges. Use the \`--dry-run' option if you just want to see what would be executed." ;; install) $ECHO \ "Usage: $progname [OPTION]... --mode=install INSTALL-COMMAND... Install executables or libraries. INSTALL-COMMAND is the installation command. The first component should be either the \`install' or \`cp' program. The following components of INSTALL-COMMAND are treated specially: -inst-prefix-dir PREFIX-DIR Use PREFIX-DIR as a staging area for installation The rest of the components are interpreted as arguments to that command (only BSD-compatible install options are recognized)." ;; link) $ECHO \ "Usage: $progname [OPTION]... --mode=link LINK-COMMAND... Link object files or libraries together to form another library, or to create an executable program. LINK-COMMAND is a command using the C compiler that you would use to create a program from several object files. The following components of LINK-COMMAND are treated specially: -all-static do not do any dynamic linking at all -avoid-version do not add a version suffix if possible -bindir BINDIR specify path to binaries directory (for systems where libraries must be found in the PATH setting at runtime) -dlopen FILE \`-dlpreopen' FILE if it cannot be dlopened at runtime -dlpreopen FILE link in FILE and add its symbols to lt_preloaded_symbols -export-dynamic allow symbols from OUTPUT-FILE to be resolved with dlsym(3) -export-symbols SYMFILE try to export only the symbols listed in SYMFILE -export-symbols-regex REGEX try to export only the symbols matching REGEX -LLIBDIR search LIBDIR for required installed libraries -lNAME OUTPUT-FILE requires the installed library libNAME -module build a library that can dlopened -no-fast-install disable the fast-install mode -no-install link a not-installable executable -no-undefined declare that a library does not refer to external symbols -o OUTPUT-FILE create OUTPUT-FILE from the specified objects -objectlist FILE Use a list of object files found in FILE to specify objects -precious-files-regex REGEX don't remove output files matching REGEX -release RELEASE specify package release information -rpath LIBDIR the created library will eventually be installed in LIBDIR -R[ ]LIBDIR add LIBDIR to the runtime path of programs and libraries -shared only do dynamic linking of libtool libraries -shrext SUFFIX override the standard shared library file extension -static do not do any dynamic linking of uninstalled libtool libraries -static-libtool-libs do not do any dynamic linking of libtool libraries -version-info CURRENT[:REVISION[:AGE]] specify library version info [each variable defaults to 0] -weak LIBNAME declare that the target provides the LIBNAME interface -Wc,FLAG -Xcompiler FLAG pass linker-specific FLAG directly to the compiler -Wl,FLAG -Xlinker FLAG pass linker-specific FLAG directly to the linker -XCClinker FLAG pass link-specific FLAG to the compiler driver (CC) All other options (arguments beginning with \`-') are ignored. Every other argument is treated as a filename. Files ending in \`.la' are treated as uninstalled libtool libraries, other files are standard or library object files. If the OUTPUT-FILE ends in \`.la', then a libtool library is created, only library objects (\`.lo' files) may be specified, and \`-rpath' is required, except when creating a convenience library. If OUTPUT-FILE ends in \`.a' or \`.lib', then a standard library is created using \`ar' and \`ranlib', or on Windows using \`lib'. If OUTPUT-FILE ends in \`.lo' or \`.${objext}', then a reloadable object file is created, otherwise an executable program is created." ;; uninstall) $ECHO \ "Usage: $progname [OPTION]... --mode=uninstall RM [RM-OPTION]... FILE... Remove libraries from an installation directory. RM is the name of the program to use to delete files associated with each FILE (typically \`/bin/rm'). RM-OPTIONS are options (such as \`-f') to be passed to RM. If FILE is a libtool library, all the files associated with it are deleted. Otherwise, only FILE itself is deleted using RM." ;; *) func_fatal_help "invalid operation mode \`$opt_mode'" ;; esac echo $ECHO "Try \`$progname --help' for more information about other modes." } # Now that we've collected a possible --mode arg, show help if necessary if $opt_help; then if test "$opt_help" = :; then func_mode_help else { func_help noexit for opt_mode in compile link execute install finish uninstall clean; do func_mode_help done } | sed -n '1p; 2,$s/^Usage:/ or: /p' { func_help noexit for opt_mode in compile link execute install finish uninstall clean; do echo func_mode_help done } | sed '1d /^When reporting/,/^Report/{ H d } $x /information about other modes/d /more detailed .*MODE/d s/^Usage:.*--mode=\([^ ]*\) .*/Description of \1 mode:/' fi exit $? fi # func_mode_execute arg... func_mode_execute () { $opt_debug # The first argument is the command name. cmd="$nonopt" test -z "$cmd" && \ func_fatal_help "you must specify a COMMAND" # Handle -dlopen flags immediately. for file in $opt_dlopen; do test -f "$file" \ || func_fatal_help "\`$file' is not a file" dir= case $file in *.la) func_resolve_sysroot "$file" file=$func_resolve_sysroot_result # Check to see that this really is a libtool archive. func_lalib_unsafe_p "$file" \ || func_fatal_help "\`$lib' is not a valid libtool archive" # Read the libtool library. dlname= library_names= func_source "$file" # Skip this library if it cannot be dlopened. if test -z "$dlname"; then # Warn if it was a shared library. test -n "$library_names" && \ func_warning "\`$file' was not linked with \`-export-dynamic'" continue fi func_dirname "$file" "" "." dir="$func_dirname_result" if test -f "$dir/$objdir/$dlname"; then func_append dir "/$objdir" else if test ! -f "$dir/$dlname"; then func_fatal_error "cannot find \`$dlname' in \`$dir' or \`$dir/$objdir'" fi fi ;; *.lo) # Just add the directory containing the .lo file. func_dirname "$file" "" "." dir="$func_dirname_result" ;; *) func_warning "\`-dlopen' is ignored for non-libtool libraries and objects" continue ;; esac # Get the absolute pathname. absdir=`cd "$dir" && pwd` test -n "$absdir" && dir="$absdir" # Now add the directory to shlibpath_var. if eval "test -z \"\$$shlibpath_var\""; then eval "$shlibpath_var=\"\$dir\"" else eval "$shlibpath_var=\"\$dir:\$$shlibpath_var\"" fi done # This variable tells wrapper scripts just to set shlibpath_var # rather than running their programs. libtool_execute_magic="$magic" # Check if any of the arguments is a wrapper script. args= for file do case $file in -* | *.la | *.lo ) ;; *) # Do a test to see if this is really a libtool program. if func_ltwrapper_script_p "$file"; then func_source "$file" # Transform arg to wrapped name. file="$progdir/$program" elif func_ltwrapper_executable_p "$file"; then func_ltwrapper_scriptname "$file" func_source "$func_ltwrapper_scriptname_result" # Transform arg to wrapped name. file="$progdir/$program" fi ;; esac # Quote arguments (to preserve shell metacharacters). func_append_quoted args "$file" done if test "X$opt_dry_run" = Xfalse; then if test -n "$shlibpath_var"; then # Export the shlibpath_var. eval "export $shlibpath_var" fi # Restore saved environment variables for lt_var in LANG LANGUAGE LC_ALL LC_CTYPE LC_COLLATE LC_MESSAGES do eval "if test \"\${save_$lt_var+set}\" = set; then $lt_var=\$save_$lt_var; export $lt_var else $lt_unset $lt_var fi" done # Now prepare to actually exec the command. exec_cmd="\$cmd$args" else # Display what would be done. if test -n "$shlibpath_var"; then eval "\$ECHO \"\$shlibpath_var=\$$shlibpath_var\"" echo "export $shlibpath_var" fi $ECHO "$cmd$args" exit $EXIT_SUCCESS fi } test "$opt_mode" = execute && func_mode_execute ${1+"$@"} # func_mode_finish arg... func_mode_finish () { $opt_debug libs= libdirs= admincmds= for opt in "$nonopt" ${1+"$@"} do if test -d "$opt"; then func_append libdirs " $opt" elif test -f "$opt"; then if func_lalib_unsafe_p "$opt"; then func_append libs " $opt" else func_warning "\`$opt' is not a valid libtool archive" fi else func_fatal_error "invalid argument \`$opt'" fi done if test -n "$libs"; then if test -n "$lt_sysroot"; then sysroot_regex=`$ECHO "$lt_sysroot" | $SED "$sed_make_literal_regex"` sysroot_cmd="s/\([ ']\)$sysroot_regex/\1/g;" else sysroot_cmd= fi # Remove sysroot references if $opt_dry_run; then for lib in $libs; do echo "removing references to $lt_sysroot and \`=' prefixes from $lib" done else tmpdir=`func_mktempdir` for lib in $libs; do sed -e "${sysroot_cmd} s/\([ ']-[LR]\)=/\1/g; s/\([ ']\)=/\1/g" $lib \ > $tmpdir/tmp-la mv -f $tmpdir/tmp-la $lib done ${RM}r "$tmpdir" fi fi if test -n "$finish_cmds$finish_eval" && test -n "$libdirs"; then for libdir in $libdirs; do if test -n "$finish_cmds"; then # Do each command in the finish commands. func_execute_cmds "$finish_cmds" 'admincmds="$admincmds '"$cmd"'"' fi if test -n "$finish_eval"; then # Do the single finish_eval. eval cmds=\"$finish_eval\" $opt_dry_run || eval "$cmds" || func_append admincmds " $cmds" fi done fi # Exit here if they wanted silent mode. $opt_silent && exit $EXIT_SUCCESS if test -n "$finish_cmds$finish_eval" && test -n "$libdirs"; then echo "----------------------------------------------------------------------" echo "Libraries have been installed in:" for libdir in $libdirs; do $ECHO " $libdir" done echo echo "If you ever happen to want to link against installed libraries" echo "in a given directory, LIBDIR, you must either use libtool, and" echo "specify the full pathname of the library, or use the \`-LLIBDIR'" echo "flag during linking and do at least one of the following:" if test -n "$shlibpath_var"; then echo " - add LIBDIR to the \`$shlibpath_var' environment variable" echo " during execution" fi if test -n "$runpath_var"; then echo " - add LIBDIR to the \`$runpath_var' environment variable" echo " during linking" fi if test -n "$hardcode_libdir_flag_spec"; then libdir=LIBDIR eval flag=\"$hardcode_libdir_flag_spec\" $ECHO " - use the \`$flag' linker flag" fi if test -n "$admincmds"; then $ECHO " - have your system administrator run these commands:$admincmds" fi if test -f /etc/ld.so.conf; then echo " - have your system administrator add LIBDIR to \`/etc/ld.so.conf'" fi echo echo "See any operating system documentation about shared libraries for" case $host in solaris2.[6789]|solaris2.1[0-9]) echo "more information, such as the ld(1), crle(1) and ld.so(8) manual" echo "pages." ;; *) echo "more information, such as the ld(1) and ld.so(8) manual pages." ;; esac echo "----------------------------------------------------------------------" fi exit $EXIT_SUCCESS } test "$opt_mode" = finish && func_mode_finish ${1+"$@"} # func_mode_install arg... func_mode_install () { $opt_debug # There may be an optional sh(1) argument at the beginning of # install_prog (especially on Windows NT). if test "$nonopt" = "$SHELL" || test "$nonopt" = /bin/sh || # Allow the use of GNU shtool's install command. case $nonopt in *shtool*) :;; *) false;; esac; then # Aesthetically quote it. func_quote_for_eval "$nonopt" install_prog="$func_quote_for_eval_result " arg=$1 shift else install_prog= arg=$nonopt fi # The real first argument should be the name of the installation program. # Aesthetically quote it. func_quote_for_eval "$arg" func_append install_prog "$func_quote_for_eval_result" install_shared_prog=$install_prog case " $install_prog " in *[\\\ /]cp\ *) install_cp=: ;; *) install_cp=false ;; esac # We need to accept at least all the BSD install flags. dest= files= opts= prev= install_type= isdir=no stripme= no_mode=: for arg do arg2= if test -n "$dest"; then func_append files " $dest" dest=$arg continue fi case $arg in -d) isdir=yes ;; -f) if $install_cp; then :; else prev=$arg fi ;; -g | -m | -o) prev=$arg ;; -s) stripme=" -s" continue ;; -*) ;; *) # If the previous option needed an argument, then skip it. if test -n "$prev"; then if test "x$prev" = x-m && test -n "$install_override_mode"; then arg2=$install_override_mode no_mode=false fi prev= else dest=$arg continue fi ;; esac # Aesthetically quote the argument. func_quote_for_eval "$arg" func_append install_prog " $func_quote_for_eval_result" if test -n "$arg2"; then func_quote_for_eval "$arg2" fi func_append install_shared_prog " $func_quote_for_eval_result" done test -z "$install_prog" && \ func_fatal_help "you must specify an install program" test -n "$prev" && \ func_fatal_help "the \`$prev' option requires an argument" if test -n "$install_override_mode" && $no_mode; then if $install_cp; then :; else func_quote_for_eval "$install_override_mode" func_append install_shared_prog " -m $func_quote_for_eval_result" fi fi if test -z "$files"; then if test -z "$dest"; then func_fatal_help "no file or destination specified" else func_fatal_help "you must specify a destination" fi fi # Strip any trailing slash from the destination. func_stripname '' '/' "$dest" dest=$func_stripname_result # Check to see that the destination is a directory. test -d "$dest" && isdir=yes if test "$isdir" = yes; then destdir="$dest" destname= else func_dirname_and_basename "$dest" "" "." destdir="$func_dirname_result" destname="$func_basename_result" # Not a directory, so check to see that there is only one file specified. set dummy $files; shift test "$#" -gt 1 && \ func_fatal_help "\`$dest' is not a directory" fi case $destdir in [\\/]* | [A-Za-z]:[\\/]*) ;; *) for file in $files; do case $file in *.lo) ;; *) func_fatal_help "\`$destdir' must be an absolute directory name" ;; esac done ;; esac # This variable tells wrapper scripts just to set variables rather # than running their programs. libtool_install_magic="$magic" staticlibs= future_libdirs= current_libdirs= for file in $files; do # Do each installation. case $file in *.$libext) # Do the static libraries later. func_append staticlibs " $file" ;; *.la) func_resolve_sysroot "$file" file=$func_resolve_sysroot_result # Check to see that this really is a libtool archive. func_lalib_unsafe_p "$file" \ || func_fatal_help "\`$file' is not a valid libtool archive" library_names= old_library= relink_command= func_source "$file" # Add the libdir to current_libdirs if it is the destination. if test "X$destdir" = "X$libdir"; then case "$current_libdirs " in *" $libdir "*) ;; *) func_append current_libdirs " $libdir" ;; esac else # Note the libdir as a future libdir. case "$future_libdirs " in *" $libdir "*) ;; *) func_append future_libdirs " $libdir" ;; esac fi func_dirname "$file" "/" "" dir="$func_dirname_result" func_append dir "$objdir" if test -n "$relink_command"; then # Determine the prefix the user has applied to our future dir. inst_prefix_dir=`$ECHO "$destdir" | $SED -e "s%$libdir\$%%"` # Don't allow the user to place us outside of our expected # location b/c this prevents finding dependent libraries that # are installed to the same prefix. # At present, this check doesn't affect windows .dll's that # are installed into $libdir/../bin (currently, that works fine) # but it's something to keep an eye on. test "$inst_prefix_dir" = "$destdir" && \ func_fatal_error "error: cannot install \`$file' to a directory not ending in $libdir" if test -n "$inst_prefix_dir"; then # Stick the inst_prefix_dir data into the link command. relink_command=`$ECHO "$relink_command" | $SED "s%@inst_prefix_dir@%-inst-prefix-dir $inst_prefix_dir%"` else relink_command=`$ECHO "$relink_command" | $SED "s%@inst_prefix_dir@%%"` fi func_warning "relinking \`$file'" func_show_eval "$relink_command" \ 'func_fatal_error "error: relink \`$file'\'' with the above command before installing it"' fi # See the names of the shared library. set dummy $library_names; shift if test -n "$1"; then realname="$1" shift srcname="$realname" test -n "$relink_command" && srcname="$realname"T # Install the shared library and build the symlinks. func_show_eval "$install_shared_prog $dir/$srcname $destdir/$realname" \ 'exit $?' tstripme="$stripme" case $host_os in cygwin* | mingw* | pw32* | cegcc*) case $realname in *.dll.a) tstripme="" ;; esac ;; esac if test -n "$tstripme" && test -n "$striplib"; then func_show_eval "$striplib $destdir/$realname" 'exit $?' fi if test "$#" -gt 0; then # Delete the old symlinks, and create new ones. # Try `ln -sf' first, because the `ln' binary might depend on # the symlink we replace! Solaris /bin/ln does not understand -f, # so we also need to try rm && ln -s. for linkname do test "$linkname" != "$realname" \ && func_show_eval "(cd $destdir && { $LN_S -f $realname $linkname || { $RM $linkname && $LN_S $realname $linkname; }; })" done fi # Do each command in the postinstall commands. lib="$destdir/$realname" func_execute_cmds "$postinstall_cmds" 'exit $?' fi # Install the pseudo-library for information purposes. func_basename "$file" name="$func_basename_result" instname="$dir/$name"i func_show_eval "$install_prog $instname $destdir/$name" 'exit $?' # Maybe install the static library, too. test -n "$old_library" && func_append staticlibs " $dir/$old_library" ;; *.lo) # Install (i.e. copy) a libtool object. # Figure out destination file name, if it wasn't already specified. if test -n "$destname"; then destfile="$destdir/$destname" else func_basename "$file" destfile="$func_basename_result" destfile="$destdir/$destfile" fi # Deduce the name of the destination old-style object file. case $destfile in *.lo) func_lo2o "$destfile" staticdest=$func_lo2o_result ;; *.$objext) staticdest="$destfile" destfile= ;; *) func_fatal_help "cannot copy a libtool object to \`$destfile'" ;; esac # Install the libtool object if requested. test -n "$destfile" && \ func_show_eval "$install_prog $file $destfile" 'exit $?' # Install the old object if enabled. if test "$build_old_libs" = yes; then # Deduce the name of the old-style object file. func_lo2o "$file" staticobj=$func_lo2o_result func_show_eval "$install_prog \$staticobj \$staticdest" 'exit $?' fi exit $EXIT_SUCCESS ;; *) # Figure out destination file name, if it wasn't already specified. if test -n "$destname"; then destfile="$destdir/$destname" else func_basename "$file" destfile="$func_basename_result" destfile="$destdir/$destfile" fi # If the file is missing, and there is a .exe on the end, strip it # because it is most likely a libtool script we actually want to # install stripped_ext="" case $file in *.exe) if test ! -f "$file"; then func_stripname '' '.exe' "$file" file=$func_stripname_result stripped_ext=".exe" fi ;; esac # Do a test to see if this is really a libtool program. case $host in *cygwin* | *mingw*) if func_ltwrapper_executable_p "$file"; then func_ltwrapper_scriptname "$file" wrapper=$func_ltwrapper_scriptname_result else func_stripname '' '.exe' "$file" wrapper=$func_stripname_result fi ;; *) wrapper=$file ;; esac if func_ltwrapper_script_p "$wrapper"; then notinst_deplibs= relink_command= func_source "$wrapper" # Check the variables that should have been set. test -z "$generated_by_libtool_version" && \ func_fatal_error "invalid libtool wrapper script \`$wrapper'" finalize=yes for lib in $notinst_deplibs; do # Check to see that each library is installed. libdir= if test -f "$lib"; then func_source "$lib" fi libfile="$libdir/"`$ECHO "$lib" | $SED 's%^.*/%%g'` ### testsuite: skip nested quoting test if test -n "$libdir" && test ! -f "$libfile"; then func_warning "\`$lib' has not been installed in \`$libdir'" finalize=no fi done relink_command= func_source "$wrapper" outputname= if test "$fast_install" = no && test -n "$relink_command"; then $opt_dry_run || { if test "$finalize" = yes; then tmpdir=`func_mktempdir` func_basename "$file$stripped_ext" file="$func_basename_result" outputname="$tmpdir/$file" # Replace the output file specification. relink_command=`$ECHO "$relink_command" | $SED 's%@OUTPUT@%'"$outputname"'%g'` $opt_silent || { func_quote_for_expand "$relink_command" eval "func_echo $func_quote_for_expand_result" } if eval "$relink_command"; then : else func_error "error: relink \`$file' with the above command before installing it" $opt_dry_run || ${RM}r "$tmpdir" continue fi file="$outputname" else func_warning "cannot relink \`$file'" fi } else # Install the binary that we compiled earlier. file=`$ECHO "$file$stripped_ext" | $SED "s%\([^/]*\)$%$objdir/\1%"` fi fi # remove .exe since cygwin /usr/bin/install will append another # one anyway case $install_prog,$host in */usr/bin/install*,*cygwin*) case $file:$destfile in *.exe:*.exe) # this is ok ;; *.exe:*) destfile=$destfile.exe ;; *:*.exe) func_stripname '' '.exe' "$destfile" destfile=$func_stripname_result ;; esac ;; esac func_show_eval "$install_prog\$stripme \$file \$destfile" 'exit $?' $opt_dry_run || if test -n "$outputname"; then ${RM}r "$tmpdir" fi ;; esac done for file in $staticlibs; do func_basename "$file" name="$func_basename_result" # Set up the ranlib parameters. oldlib="$destdir/$name" func_to_tool_file "$oldlib" func_convert_file_msys_to_w32 tool_oldlib=$func_to_tool_file_result func_show_eval "$install_prog \$file \$oldlib" 'exit $?' if test -n "$stripme" && test -n "$old_striplib"; then func_show_eval "$old_striplib $tool_oldlib" 'exit $?' fi # Do each command in the postinstall commands. func_execute_cmds "$old_postinstall_cmds" 'exit $?' done test -n "$future_libdirs" && \ func_warning "remember to run \`$progname --finish$future_libdirs'" if test -n "$current_libdirs"; then # Maybe just do a dry run. $opt_dry_run && current_libdirs=" -n$current_libdirs" exec_cmd='$SHELL $progpath $preserve_args --finish$current_libdirs' else exit $EXIT_SUCCESS fi } test "$opt_mode" = install && func_mode_install ${1+"$@"} # func_generate_dlsyms outputname originator pic_p # Extract symbols from dlprefiles and create ${outputname}S.o with # a dlpreopen symbol table. func_generate_dlsyms () { $opt_debug my_outputname="$1" my_originator="$2" my_pic_p="${3-no}" my_prefix=`$ECHO "$my_originator" | sed 's%[^a-zA-Z0-9]%_%g'` my_dlsyms= if test -n "$dlfiles$dlprefiles" || test "$dlself" != no; then if test -n "$NM" && test -n "$global_symbol_pipe"; then my_dlsyms="${my_outputname}S.c" else func_error "not configured to extract global symbols from dlpreopened files" fi fi if test -n "$my_dlsyms"; then case $my_dlsyms in "") ;; *.c) # Discover the nlist of each of the dlfiles. nlist="$output_objdir/${my_outputname}.nm" func_show_eval "$RM $nlist ${nlist}S ${nlist}T" # Parse the name list into a source file. func_verbose "creating $output_objdir/$my_dlsyms" $opt_dry_run || $ECHO > "$output_objdir/$my_dlsyms" "\ /* $my_dlsyms - symbol resolution table for \`$my_outputname' dlsym emulation. */ /* Generated by $PROGRAM (GNU $PACKAGE$TIMESTAMP) $VERSION */ #ifdef __cplusplus extern \"C\" { #endif #if defined(__GNUC__) && (((__GNUC__ == 4) && (__GNUC_MINOR__ >= 4)) || (__GNUC__ > 4)) #pragma GCC diagnostic ignored \"-Wstrict-prototypes\" #endif /* Keep this code in sync between libtool.m4, ltmain, lt_system.h, and tests. */ #if defined(_WIN32) || defined(__CYGWIN__) || defined(_WIN32_WCE) /* DATA imports from DLLs on WIN32 con't be const, because runtime relocations are performed -- see ld's documentation on pseudo-relocs. */ # define LT_DLSYM_CONST #elif defined(__osf__) /* This system does not cope well with relocations in const data. */ # define LT_DLSYM_CONST #else # define LT_DLSYM_CONST const #endif /* External symbol declarations for the compiler. */\ " if test "$dlself" = yes; then func_verbose "generating symbol list for \`$output'" $opt_dry_run || echo ': @PROGRAM@ ' > "$nlist" # Add our own program objects to the symbol list. progfiles=`$ECHO "$objs$old_deplibs" | $SP2NL | $SED "$lo2o" | $NL2SP` for progfile in $progfiles; do func_to_tool_file "$progfile" func_convert_file_msys_to_w32 func_verbose "extracting global C symbols from \`$func_to_tool_file_result'" $opt_dry_run || eval "$NM $func_to_tool_file_result | $global_symbol_pipe >> '$nlist'" done if test -n "$exclude_expsyms"; then $opt_dry_run || { eval '$EGREP -v " ($exclude_expsyms)$" "$nlist" > "$nlist"T' eval '$MV "$nlist"T "$nlist"' } fi if test -n "$export_symbols_regex"; then $opt_dry_run || { eval '$EGREP -e "$export_symbols_regex" "$nlist" > "$nlist"T' eval '$MV "$nlist"T "$nlist"' } fi # Prepare the list of exported symbols if test -z "$export_symbols"; then export_symbols="$output_objdir/$outputname.exp" $opt_dry_run || { $RM $export_symbols eval "${SED} -n -e '/^: @PROGRAM@ $/d' -e 's/^.* \(.*\)$/\1/p' "'< "$nlist" > "$export_symbols"' case $host in *cygwin* | *mingw* | *cegcc* ) eval "echo EXPORTS "'> "$output_objdir/$outputname.def"' eval 'cat "$export_symbols" >> "$output_objdir/$outputname.def"' ;; esac } else $opt_dry_run || { eval "${SED} -e 's/\([].[*^$]\)/\\\\\1/g' -e 's/^/ /' -e 's/$/$/'"' < "$export_symbols" > "$output_objdir/$outputname.exp"' eval '$GREP -f "$output_objdir/$outputname.exp" < "$nlist" > "$nlist"T' eval '$MV "$nlist"T "$nlist"' case $host in *cygwin* | *mingw* | *cegcc* ) eval "echo EXPORTS "'> "$output_objdir/$outputname.def"' eval 'cat "$nlist" >> "$output_objdir/$outputname.def"' ;; esac } fi fi for dlprefile in $dlprefiles; do func_verbose "extracting global C symbols from \`$dlprefile'" func_basename "$dlprefile" name="$func_basename_result" case $host in *cygwin* | *mingw* | *cegcc* ) # if an import library, we need to obtain dlname if func_win32_import_lib_p "$dlprefile"; then func_tr_sh "$dlprefile" eval "curr_lafile=\$libfile_$func_tr_sh_result" dlprefile_dlbasename="" if test -n "$curr_lafile" && func_lalib_p "$curr_lafile"; then # Use subshell, to avoid clobbering current variable values dlprefile_dlname=`source "$curr_lafile" && echo "$dlname"` if test -n "$dlprefile_dlname" ; then func_basename "$dlprefile_dlname" dlprefile_dlbasename="$func_basename_result" else # no lafile. user explicitly requested -dlpreopen . $sharedlib_from_linklib_cmd "$dlprefile" dlprefile_dlbasename=$sharedlib_from_linklib_result fi fi $opt_dry_run || { if test -n "$dlprefile_dlbasename" ; then eval '$ECHO ": $dlprefile_dlbasename" >> "$nlist"' else func_warning "Could not compute DLL name from $name" eval '$ECHO ": $name " >> "$nlist"' fi func_to_tool_file "$dlprefile" func_convert_file_msys_to_w32 eval "$NM \"$func_to_tool_file_result\" 2>/dev/null | $global_symbol_pipe | $SED -e '/I __imp/d' -e 's/I __nm_/D /;s/_nm__//' >> '$nlist'" } else # not an import lib $opt_dry_run || { eval '$ECHO ": $name " >> "$nlist"' func_to_tool_file "$dlprefile" func_convert_file_msys_to_w32 eval "$NM \"$func_to_tool_file_result\" 2>/dev/null | $global_symbol_pipe >> '$nlist'" } fi ;; *) $opt_dry_run || { eval '$ECHO ": $name " >> "$nlist"' func_to_tool_file "$dlprefile" func_convert_file_msys_to_w32 eval "$NM \"$func_to_tool_file_result\" 2>/dev/null | $global_symbol_pipe >> '$nlist'" } ;; esac done $opt_dry_run || { # Make sure we have at least an empty file. test -f "$nlist" || : > "$nlist" if test -n "$exclude_expsyms"; then $EGREP -v " ($exclude_expsyms)$" "$nlist" > "$nlist"T $MV "$nlist"T "$nlist" fi # Try sorting and uniquifying the output. if $GREP -v "^: " < "$nlist" | if sort -k 3 /dev/null 2>&1; then sort -k 3 else sort +2 fi | uniq > "$nlist"S; then : else $GREP -v "^: " < "$nlist" > "$nlist"S fi if test -f "$nlist"S; then eval "$global_symbol_to_cdecl"' < "$nlist"S >> "$output_objdir/$my_dlsyms"' else echo '/* NONE */' >> "$output_objdir/$my_dlsyms" fi echo >> "$output_objdir/$my_dlsyms" "\ /* The mapping between symbol names and symbols. */ typedef struct { const char *name; void *address; } lt_dlsymlist; extern LT_DLSYM_CONST lt_dlsymlist lt_${my_prefix}_LTX_preloaded_symbols[]; LT_DLSYM_CONST lt_dlsymlist lt_${my_prefix}_LTX_preloaded_symbols[] = {\ { \"$my_originator\", (void *) 0 }," case $need_lib_prefix in no) eval "$global_symbol_to_c_name_address" < "$nlist" >> "$output_objdir/$my_dlsyms" ;; *) eval "$global_symbol_to_c_name_address_lib_prefix" < "$nlist" >> "$output_objdir/$my_dlsyms" ;; esac echo >> "$output_objdir/$my_dlsyms" "\ {0, (void *) 0} }; /* This works around a problem in FreeBSD linker */ #ifdef FREEBSD_WORKAROUND static const void *lt_preloaded_setup() { return lt_${my_prefix}_LTX_preloaded_symbols; } #endif #ifdef __cplusplus } #endif\ " } # !$opt_dry_run pic_flag_for_symtable= case "$compile_command " in *" -static "*) ;; *) case $host in # compiling the symbol table file with pic_flag works around # a FreeBSD bug that causes programs to crash when -lm is # linked before any other PIC object. But we must not use # pic_flag when linking with -static. The problem exists in # FreeBSD 2.2.6 and is fixed in FreeBSD 3.1. *-*-freebsd2.*|*-*-freebsd3.0*|*-*-freebsdelf3.0*) pic_flag_for_symtable=" $pic_flag -DFREEBSD_WORKAROUND" ;; *-*-hpux*) pic_flag_for_symtable=" $pic_flag" ;; *) if test "X$my_pic_p" != Xno; then pic_flag_for_symtable=" $pic_flag" fi ;; esac ;; esac symtab_cflags= for arg in $LTCFLAGS; do case $arg in -pie | -fpie | -fPIE) ;; *) func_append symtab_cflags " $arg" ;; esac done # Now compile the dynamic symbol file. func_show_eval '(cd $output_objdir && $LTCC$symtab_cflags -c$no_builtin_flag$pic_flag_for_symtable "$my_dlsyms")' 'exit $?' # Clean up the generated files. func_show_eval '$RM "$output_objdir/$my_dlsyms" "$nlist" "${nlist}S" "${nlist}T"' # Transform the symbol file into the correct name. symfileobj="$output_objdir/${my_outputname}S.$objext" case $host in *cygwin* | *mingw* | *cegcc* ) if test -f "$output_objdir/$my_outputname.def"; then compile_command=`$ECHO "$compile_command" | $SED "s%@SYMFILE@%$output_objdir/$my_outputname.def $symfileobj%"` finalize_command=`$ECHO "$finalize_command" | $SED "s%@SYMFILE@%$output_objdir/$my_outputname.def $symfileobj%"` else compile_command=`$ECHO "$compile_command" | $SED "s%@SYMFILE@%$symfileobj%"` finalize_command=`$ECHO "$finalize_command" | $SED "s%@SYMFILE@%$symfileobj%"` fi ;; *) compile_command=`$ECHO "$compile_command" | $SED "s%@SYMFILE@%$symfileobj%"` finalize_command=`$ECHO "$finalize_command" | $SED "s%@SYMFILE@%$symfileobj%"` ;; esac ;; *) func_fatal_error "unknown suffix for \`$my_dlsyms'" ;; esac else # We keep going just in case the user didn't refer to # lt_preloaded_symbols. The linker will fail if global_symbol_pipe # really was required. # Nullify the symbol file. compile_command=`$ECHO "$compile_command" | $SED "s% @SYMFILE@%%"` finalize_command=`$ECHO "$finalize_command" | $SED "s% @SYMFILE@%%"` fi } # func_win32_libid arg # return the library type of file 'arg' # # Need a lot of goo to handle *both* DLLs and import libs # Has to be a shell function in order to 'eat' the argument # that is supplied when $file_magic_command is called. # Despite the name, also deal with 64 bit binaries. func_win32_libid () { $opt_debug win32_libid_type="unknown" win32_fileres=`file -L $1 2>/dev/null` case $win32_fileres in *ar\ archive\ import\ library*) # definitely import win32_libid_type="x86 archive import" ;; *ar\ archive*) # could be an import, or static # Keep the egrep pattern in sync with the one in _LT_CHECK_MAGIC_METHOD. if eval $OBJDUMP -f $1 | $SED -e '10q' 2>/dev/null | $EGREP 'file format (pei*-i386(.*architecture: i386)?|pe-arm-wince|pe-x86-64)' >/dev/null; then func_to_tool_file "$1" func_convert_file_msys_to_w32 win32_nmres=`eval $NM -f posix -A \"$func_to_tool_file_result\" | $SED -n -e ' 1,100{ / I /{ s,.*,import, p q } }'` case $win32_nmres in import*) win32_libid_type="x86 archive import";; *) win32_libid_type="x86 archive static";; esac fi ;; *DLL*) win32_libid_type="x86 DLL" ;; *executable*) # but shell scripts are "executable" too... case $win32_fileres in *MS\ Windows\ PE\ Intel*) win32_libid_type="x86 DLL" ;; esac ;; esac $ECHO "$win32_libid_type" } # func_cygming_dll_for_implib ARG # # Platform-specific function to extract the # name of the DLL associated with the specified # import library ARG. # Invoked by eval'ing the libtool variable # $sharedlib_from_linklib_cmd # Result is available in the variable # $sharedlib_from_linklib_result func_cygming_dll_for_implib () { $opt_debug sharedlib_from_linklib_result=`$DLLTOOL --identify-strict --identify "$1"` } # func_cygming_dll_for_implib_fallback_core SECTION_NAME LIBNAMEs # # The is the core of a fallback implementation of a # platform-specific function to extract the name of the # DLL associated with the specified import library LIBNAME. # # SECTION_NAME is either .idata$6 or .idata$7, depending # on the platform and compiler that created the implib. # # Echos the name of the DLL associated with the # specified import library. func_cygming_dll_for_implib_fallback_core () { $opt_debug match_literal=`$ECHO "$1" | $SED "$sed_make_literal_regex"` $OBJDUMP -s --section "$1" "$2" 2>/dev/null | $SED '/^Contents of section '"$match_literal"':/{ # Place marker at beginning of archive member dllname section s/.*/====MARK====/ p d } # These lines can sometimes be longer than 43 characters, but # are always uninteresting /:[ ]*file format pe[i]\{,1\}-/d /^In archive [^:]*:/d # Ensure marker is printed /^====MARK====/p # Remove all lines with less than 43 characters /^.\{43\}/!d # From remaining lines, remove first 43 characters s/^.\{43\}//' | $SED -n ' # Join marker and all lines until next marker into a single line /^====MARK====/ b para H $ b para b :para x s/\n//g # Remove the marker s/^====MARK====// # Remove trailing dots and whitespace s/[\. \t]*$// # Print /./p' | # we now have a list, one entry per line, of the stringified # contents of the appropriate section of all members of the # archive which possess that section. Heuristic: eliminate # all those which have a first or second character that is # a '.' (that is, objdump's representation of an unprintable # character.) This should work for all archives with less than # 0x302f exports -- but will fail for DLLs whose name actually # begins with a literal '.' or a single character followed by # a '.'. # # Of those that remain, print the first one. $SED -e '/^\./d;/^.\./d;q' } # func_cygming_gnu_implib_p ARG # This predicate returns with zero status (TRUE) if # ARG is a GNU/binutils-style import library. Returns # with nonzero status (FALSE) otherwise. func_cygming_gnu_implib_p () { $opt_debug func_to_tool_file "$1" func_convert_file_msys_to_w32 func_cygming_gnu_implib_tmp=`$NM "$func_to_tool_file_result" | eval "$global_symbol_pipe" | $EGREP ' (_head_[A-Za-z0-9_]+_[ad]l*|[A-Za-z0-9_]+_[ad]l*_iname)$'` test -n "$func_cygming_gnu_implib_tmp" } # func_cygming_ms_implib_p ARG # This predicate returns with zero status (TRUE) if # ARG is an MS-style import library. Returns # with nonzero status (FALSE) otherwise. func_cygming_ms_implib_p () { $opt_debug func_to_tool_file "$1" func_convert_file_msys_to_w32 func_cygming_ms_implib_tmp=`$NM "$func_to_tool_file_result" | eval "$global_symbol_pipe" | $GREP '_NULL_IMPORT_DESCRIPTOR'` test -n "$func_cygming_ms_implib_tmp" } # func_cygming_dll_for_implib_fallback ARG # Platform-specific function to extract the # name of the DLL associated with the specified # import library ARG. # # This fallback implementation is for use when $DLLTOOL # does not support the --identify-strict option. # Invoked by eval'ing the libtool variable # $sharedlib_from_linklib_cmd # Result is available in the variable # $sharedlib_from_linklib_result func_cygming_dll_for_implib_fallback () { $opt_debug if func_cygming_gnu_implib_p "$1" ; then # binutils import library sharedlib_from_linklib_result=`func_cygming_dll_for_implib_fallback_core '.idata$7' "$1"` elif func_cygming_ms_implib_p "$1" ; then # ms-generated import library sharedlib_from_linklib_result=`func_cygming_dll_for_implib_fallback_core '.idata$6' "$1"` else # unknown sharedlib_from_linklib_result="" fi } # func_extract_an_archive dir oldlib func_extract_an_archive () { $opt_debug f_ex_an_ar_dir="$1"; shift f_ex_an_ar_oldlib="$1" if test "$lock_old_archive_extraction" = yes; then lockfile=$f_ex_an_ar_oldlib.lock until $opt_dry_run || ln "$progpath" "$lockfile" 2>/dev/null; do func_echo "Waiting for $lockfile to be removed" sleep 2 done fi func_show_eval "(cd \$f_ex_an_ar_dir && $AR x \"\$f_ex_an_ar_oldlib\")" \ 'stat=$?; rm -f "$lockfile"; exit $stat' if test "$lock_old_archive_extraction" = yes; then $opt_dry_run || rm -f "$lockfile" fi if ($AR t "$f_ex_an_ar_oldlib" | sort | sort -uc >/dev/null 2>&1); then : else func_fatal_error "object name conflicts in archive: $f_ex_an_ar_dir/$f_ex_an_ar_oldlib" fi } # func_extract_archives gentop oldlib ... func_extract_archives () { $opt_debug my_gentop="$1"; shift my_oldlibs=${1+"$@"} my_oldobjs="" my_xlib="" my_xabs="" my_xdir="" for my_xlib in $my_oldlibs; do # Extract the objects. case $my_xlib in [\\/]* | [A-Za-z]:[\\/]*) my_xabs="$my_xlib" ;; *) my_xabs=`pwd`"/$my_xlib" ;; esac func_basename "$my_xlib" my_xlib="$func_basename_result" my_xlib_u=$my_xlib while :; do case " $extracted_archives " in *" $my_xlib_u "*) func_arith $extracted_serial + 1 extracted_serial=$func_arith_result my_xlib_u=lt$extracted_serial-$my_xlib ;; *) break ;; esac done extracted_archives="$extracted_archives $my_xlib_u" my_xdir="$my_gentop/$my_xlib_u" func_mkdir_p "$my_xdir" case $host in *-darwin*) func_verbose "Extracting $my_xabs" # Do not bother doing anything if just a dry run $opt_dry_run || { darwin_orig_dir=`pwd` cd $my_xdir || exit $? darwin_archive=$my_xabs darwin_curdir=`pwd` darwin_base_archive=`basename "$darwin_archive"` darwin_arches=`$LIPO -info "$darwin_archive" 2>/dev/null | $GREP Architectures 2>/dev/null || true` if test -n "$darwin_arches"; then darwin_arches=`$ECHO "$darwin_arches" | $SED -e 's/.*are://'` darwin_arch= func_verbose "$darwin_base_archive has multiple architectures $darwin_arches" for darwin_arch in $darwin_arches ; do func_mkdir_p "unfat-$$/${darwin_base_archive}-${darwin_arch}" $LIPO -thin $darwin_arch -output "unfat-$$/${darwin_base_archive}-${darwin_arch}/${darwin_base_archive}" "${darwin_archive}" cd "unfat-$$/${darwin_base_archive}-${darwin_arch}" func_extract_an_archive "`pwd`" "${darwin_base_archive}" cd "$darwin_curdir" $RM "unfat-$$/${darwin_base_archive}-${darwin_arch}/${darwin_base_archive}" done # $darwin_arches ## Okay now we've a bunch of thin objects, gotta fatten them up :) darwin_filelist=`find unfat-$$ -type f -name \*.o -print -o -name \*.lo -print | $SED -e "$basename" | sort -u` darwin_file= darwin_files= for darwin_file in $darwin_filelist; do darwin_files=`find unfat-$$ -name $darwin_file -print | sort | $NL2SP` $LIPO -create -output "$darwin_file" $darwin_files done # $darwin_filelist $RM -rf unfat-$$ cd "$darwin_orig_dir" else cd $darwin_orig_dir func_extract_an_archive "$my_xdir" "$my_xabs" fi # $darwin_arches } # !$opt_dry_run ;; *) func_extract_an_archive "$my_xdir" "$my_xabs" ;; esac my_oldobjs="$my_oldobjs "`find $my_xdir -name \*.$objext -print -o -name \*.lo -print | sort | $NL2SP` done func_extract_archives_result="$my_oldobjs" } # func_emit_wrapper [arg=no] # # Emit a libtool wrapper script on stdout. # Don't directly open a file because we may want to # incorporate the script contents within a cygwin/mingw # wrapper executable. Must ONLY be called from within # func_mode_link because it depends on a number of variables # set therein. # # ARG is the value that the WRAPPER_SCRIPT_BELONGS_IN_OBJDIR # variable will take. If 'yes', then the emitted script # will assume that the directory in which it is stored is # the $objdir directory. This is a cygwin/mingw-specific # behavior. func_emit_wrapper () { func_emit_wrapper_arg1=${1-no} $ECHO "\ #! $SHELL # $output - temporary wrapper script for $objdir/$outputname # Generated by $PROGRAM (GNU $PACKAGE$TIMESTAMP) $VERSION # # The $output program cannot be directly executed until all the libtool # libraries that it depends on are installed. # # This wrapper script should never be moved out of the build directory. # If it is, it will not operate correctly. # Sed substitution that helps us do robust quoting. It backslashifies # metacharacters that are still active within double-quoted strings. sed_quote_subst='$sed_quote_subst' # Be Bourne compatible if test -n \"\${ZSH_VERSION+set}\" && (emulate sh) >/dev/null 2>&1; then emulate sh NULLCMD=: # Zsh 3.x and 4.x performs word splitting on \${1+\"\$@\"}, which # is contrary to our usage. Disable this feature. alias -g '\${1+\"\$@\"}'='\"\$@\"' setopt NO_GLOB_SUBST else case \`(set -o) 2>/dev/null\` in *posix*) set -o posix;; esac fi BIN_SH=xpg4; export BIN_SH # for Tru64 DUALCASE=1; export DUALCASE # for MKS sh # The HP-UX ksh and POSIX shell print the target directory to stdout # if CDPATH is set. (unset CDPATH) >/dev/null 2>&1 && unset CDPATH relink_command=\"$relink_command\" # This environment variable determines our operation mode. if test \"\$libtool_install_magic\" = \"$magic\"; then # install mode needs the following variables: generated_by_libtool_version='$macro_version' notinst_deplibs='$notinst_deplibs' else # When we are sourced in execute mode, \$file and \$ECHO are already set. if test \"\$libtool_execute_magic\" != \"$magic\"; then file=\"\$0\"" qECHO=`$ECHO "$ECHO" | $SED "$sed_quote_subst"` $ECHO "\ # A function that is used when there is no print builtin or printf. func_fallback_echo () { eval 'cat <<_LTECHO_EOF \$1 _LTECHO_EOF' } ECHO=\"$qECHO\" fi # Very basic option parsing. These options are (a) specific to # the libtool wrapper, (b) are identical between the wrapper # /script/ and the wrapper /executable/ which is used only on # windows platforms, and (c) all begin with the string "--lt-" # (application programs are unlikely to have options which match # this pattern). # # There are only two supported options: --lt-debug and # --lt-dump-script. There is, deliberately, no --lt-help. # # The first argument to this parsing function should be the # script's $0 value, followed by "$@". lt_option_debug= func_parse_lt_options () { lt_script_arg0=\$0 shift for lt_opt do case \"\$lt_opt\" in --lt-debug) lt_option_debug=1 ;; --lt-dump-script) lt_dump_D=\`\$ECHO \"X\$lt_script_arg0\" | $SED -e 's/^X//' -e 's%/[^/]*$%%'\` test \"X\$lt_dump_D\" = \"X\$lt_script_arg0\" && lt_dump_D=. lt_dump_F=\`\$ECHO \"X\$lt_script_arg0\" | $SED -e 's/^X//' -e 's%^.*/%%'\` cat \"\$lt_dump_D/\$lt_dump_F\" exit 0 ;; --lt-*) \$ECHO \"Unrecognized --lt- option: '\$lt_opt'\" 1>&2 exit 1 ;; esac done # Print the debug banner immediately: if test -n \"\$lt_option_debug\"; then echo \"${outputname}:${output}:\${LINENO}: libtool wrapper (GNU $PACKAGE$TIMESTAMP) $VERSION\" 1>&2 fi } # Used when --lt-debug. Prints its arguments to stdout # (redirection is the responsibility of the caller) func_lt_dump_args () { lt_dump_args_N=1; for lt_arg do \$ECHO \"${outputname}:${output}:\${LINENO}: newargv[\$lt_dump_args_N]: \$lt_arg\" lt_dump_args_N=\`expr \$lt_dump_args_N + 1\` done } # Core function for launching the target application func_exec_program_core () { " case $host in # Backslashes separate directories on plain windows *-*-mingw | *-*-os2* | *-cegcc*) $ECHO "\ if test -n \"\$lt_option_debug\"; then \$ECHO \"${outputname}:${output}:\${LINENO}: newargv[0]: \$progdir\\\\\$program\" 1>&2 func_lt_dump_args \${1+\"\$@\"} 1>&2 fi exec \"\$progdir\\\\\$program\" \${1+\"\$@\"} " ;; *) $ECHO "\ if test -n \"\$lt_option_debug\"; then \$ECHO \"${outputname}:${output}:\${LINENO}: newargv[0]: \$progdir/\$program\" 1>&2 func_lt_dump_args \${1+\"\$@\"} 1>&2 fi exec \"\$progdir/\$program\" \${1+\"\$@\"} " ;; esac $ECHO "\ \$ECHO \"\$0: cannot exec \$program \$*\" 1>&2 exit 1 } # A function to encapsulate launching the target application # Strips options in the --lt-* namespace from \$@ and # launches target application with the remaining arguments. func_exec_program () { case \" \$* \" in *\\ --lt-*) for lt_wr_arg do case \$lt_wr_arg in --lt-*) ;; *) set x \"\$@\" \"\$lt_wr_arg\"; shift;; esac shift done ;; esac func_exec_program_core \${1+\"\$@\"} } # Parse options func_parse_lt_options \"\$0\" \${1+\"\$@\"} # Find the directory that this script lives in. thisdir=\`\$ECHO \"\$file\" | $SED 's%/[^/]*$%%'\` test \"x\$thisdir\" = \"x\$file\" && thisdir=. # Follow symbolic links until we get to the real thisdir. file=\`ls -ld \"\$file\" | $SED -n 's/.*-> //p'\` while test -n \"\$file\"; do destdir=\`\$ECHO \"\$file\" | $SED 's%/[^/]*\$%%'\` # If there was a directory component, then change thisdir. if test \"x\$destdir\" != \"x\$file\"; then case \"\$destdir\" in [\\\\/]* | [A-Za-z]:[\\\\/]*) thisdir=\"\$destdir\" ;; *) thisdir=\"\$thisdir/\$destdir\" ;; esac fi file=\`\$ECHO \"\$file\" | $SED 's%^.*/%%'\` file=\`ls -ld \"\$thisdir/\$file\" | $SED -n 's/.*-> //p'\` done # Usually 'no', except on cygwin/mingw when embedded into # the cwrapper. WRAPPER_SCRIPT_BELONGS_IN_OBJDIR=$func_emit_wrapper_arg1 if test \"\$WRAPPER_SCRIPT_BELONGS_IN_OBJDIR\" = \"yes\"; then # special case for '.' if test \"\$thisdir\" = \".\"; then thisdir=\`pwd\` fi # remove .libs from thisdir case \"\$thisdir\" in *[\\\\/]$objdir ) thisdir=\`\$ECHO \"\$thisdir\" | $SED 's%[\\\\/][^\\\\/]*$%%'\` ;; $objdir ) thisdir=. ;; esac fi # Try to get the absolute directory name. absdir=\`cd \"\$thisdir\" && pwd\` test -n \"\$absdir\" && thisdir=\"\$absdir\" " if test "$fast_install" = yes; then $ECHO "\ program=lt-'$outputname'$exeext progdir=\"\$thisdir/$objdir\" if test ! -f \"\$progdir/\$program\" || { file=\`ls -1dt \"\$progdir/\$program\" \"\$progdir/../\$program\" 2>/dev/null | ${SED} 1q\`; \\ test \"X\$file\" != \"X\$progdir/\$program\"; }; then file=\"\$\$-\$program\" if test ! -d \"\$progdir\"; then $MKDIR \"\$progdir\" else $RM \"\$progdir/\$file\" fi" $ECHO "\ # relink executable if necessary if test -n \"\$relink_command\"; then if relink_command_output=\`eval \$relink_command 2>&1\`; then : else $ECHO \"\$relink_command_output\" >&2 $RM \"\$progdir/\$file\" exit 1 fi fi $MV \"\$progdir/\$file\" \"\$progdir/\$program\" 2>/dev/null || { $RM \"\$progdir/\$program\"; $MV \"\$progdir/\$file\" \"\$progdir/\$program\"; } $RM \"\$progdir/\$file\" fi" else $ECHO "\ program='$outputname' progdir=\"\$thisdir/$objdir\" " fi $ECHO "\ if test -f \"\$progdir/\$program\"; then" # fixup the dll searchpath if we need to. # # Fix the DLL searchpath if we need to. Do this before prepending # to shlibpath, because on Windows, both are PATH and uninstalled # libraries must come first. if test -n "$dllsearchpath"; then $ECHO "\ # Add the dll search path components to the executable PATH PATH=$dllsearchpath:\$PATH " fi # Export our shlibpath_var if we have one. if test "$shlibpath_overrides_runpath" = yes && test -n "$shlibpath_var" && test -n "$temp_rpath"; then $ECHO "\ # Add our own library path to $shlibpath_var $shlibpath_var=\"$temp_rpath\$$shlibpath_var\" # Some systems cannot cope with colon-terminated $shlibpath_var # The second colon is a workaround for a bug in BeOS R4 sed $shlibpath_var=\`\$ECHO \"\$$shlibpath_var\" | $SED 's/::*\$//'\` export $shlibpath_var " fi $ECHO "\ if test \"\$libtool_execute_magic\" != \"$magic\"; then # Run the actual program with our arguments. func_exec_program \${1+\"\$@\"} fi else # The program doesn't exist. \$ECHO \"\$0: error: \\\`\$progdir/\$program' does not exist\" 1>&2 \$ECHO \"This script is just a wrapper for \$program.\" 1>&2 \$ECHO \"See the $PACKAGE documentation for more information.\" 1>&2 exit 1 fi fi\ " } # func_emit_cwrapperexe_src # emit the source code for a wrapper executable on stdout # Must ONLY be called from within func_mode_link because # it depends on a number of variable set therein. func_emit_cwrapperexe_src () { cat < #include #ifdef _MSC_VER # include # include # include #else # include # include # ifdef __CYGWIN__ # include # endif #endif #include #include #include #include #include #include #include #include /* declarations of non-ANSI functions */ #if defined(__MINGW32__) # ifdef __STRICT_ANSI__ int _putenv (const char *); # endif #elif defined(__CYGWIN__) # ifdef __STRICT_ANSI__ char *realpath (const char *, char *); int putenv (char *); int setenv (const char *, const char *, int); # endif /* #elif defined (other platforms) ... */ #endif /* portability defines, excluding path handling macros */ #if defined(_MSC_VER) # define setmode _setmode # define stat _stat # define chmod _chmod # define getcwd _getcwd # define putenv _putenv # define S_IXUSR _S_IEXEC # ifndef _INTPTR_T_DEFINED # define _INTPTR_T_DEFINED # define intptr_t int # endif #elif defined(__MINGW32__) # define setmode _setmode # define stat _stat # define chmod _chmod # define getcwd _getcwd # define putenv _putenv #elif defined(__CYGWIN__) # define HAVE_SETENV # define FOPEN_WB "wb" /* #elif defined (other platforms) ... */ #endif #if defined(PATH_MAX) # define LT_PATHMAX PATH_MAX #elif defined(MAXPATHLEN) # define LT_PATHMAX MAXPATHLEN #else # define LT_PATHMAX 1024 #endif #ifndef S_IXOTH # define S_IXOTH 0 #endif #ifndef S_IXGRP # define S_IXGRP 0 #endif /* path handling portability macros */ #ifndef DIR_SEPARATOR # define DIR_SEPARATOR '/' # define PATH_SEPARATOR ':' #endif #if defined (_WIN32) || defined (__MSDOS__) || defined (__DJGPP__) || \ defined (__OS2__) # define HAVE_DOS_BASED_FILE_SYSTEM # define FOPEN_WB "wb" # ifndef DIR_SEPARATOR_2 # define DIR_SEPARATOR_2 '\\' # endif # ifndef PATH_SEPARATOR_2 # define PATH_SEPARATOR_2 ';' # endif #endif #ifndef DIR_SEPARATOR_2 # define IS_DIR_SEPARATOR(ch) ((ch) == DIR_SEPARATOR) #else /* DIR_SEPARATOR_2 */ # define IS_DIR_SEPARATOR(ch) \ (((ch) == DIR_SEPARATOR) || ((ch) == DIR_SEPARATOR_2)) #endif /* DIR_SEPARATOR_2 */ #ifndef PATH_SEPARATOR_2 # define IS_PATH_SEPARATOR(ch) ((ch) == PATH_SEPARATOR) #else /* PATH_SEPARATOR_2 */ # define IS_PATH_SEPARATOR(ch) ((ch) == PATH_SEPARATOR_2) #endif /* PATH_SEPARATOR_2 */ #ifndef FOPEN_WB # define FOPEN_WB "w" #endif #ifndef _O_BINARY # define _O_BINARY 0 #endif #define XMALLOC(type, num) ((type *) xmalloc ((num) * sizeof(type))) #define XFREE(stale) do { \ if (stale) { free ((void *) stale); stale = 0; } \ } while (0) #if defined(LT_DEBUGWRAPPER) static int lt_debug = 1; #else static int lt_debug = 0; #endif const char *program_name = "libtool-wrapper"; /* in case xstrdup fails */ void *xmalloc (size_t num); char *xstrdup (const char *string); const char *base_name (const char *name); char *find_executable (const char *wrapper); char *chase_symlinks (const char *pathspec); int make_executable (const char *path); int check_executable (const char *path); char *strendzap (char *str, const char *pat); void lt_debugprintf (const char *file, int line, const char *fmt, ...); void lt_fatal (const char *file, int line, const char *message, ...); static const char *nonnull (const char *s); static const char *nonempty (const char *s); void lt_setenv (const char *name, const char *value); char *lt_extend_str (const char *orig_value, const char *add, int to_end); void lt_update_exe_path (const char *name, const char *value); void lt_update_lib_path (const char *name, const char *value); char **prepare_spawn (char **argv); void lt_dump_script (FILE *f); EOF cat <= 0) && (st.st_mode & (S_IXUSR | S_IXGRP | S_IXOTH))) return 1; else return 0; } int make_executable (const char *path) { int rval = 0; struct stat st; lt_debugprintf (__FILE__, __LINE__, "(make_executable): %s\n", nonempty (path)); if ((!path) || (!*path)) return 0; if (stat (path, &st) >= 0) { rval = chmod (path, st.st_mode | S_IXOTH | S_IXGRP | S_IXUSR); } return rval; } /* Searches for the full path of the wrapper. Returns newly allocated full path name if found, NULL otherwise Does not chase symlinks, even on platforms that support them. */ char * find_executable (const char *wrapper) { int has_slash = 0; const char *p; const char *p_next; /* static buffer for getcwd */ char tmp[LT_PATHMAX + 1]; int tmp_len; char *concat_name; lt_debugprintf (__FILE__, __LINE__, "(find_executable): %s\n", nonempty (wrapper)); if ((wrapper == NULL) || (*wrapper == '\0')) return NULL; /* Absolute path? */ #if defined (HAVE_DOS_BASED_FILE_SYSTEM) if (isalpha ((unsigned char) wrapper[0]) && wrapper[1] == ':') { concat_name = xstrdup (wrapper); if (check_executable (concat_name)) return concat_name; XFREE (concat_name); } else { #endif if (IS_DIR_SEPARATOR (wrapper[0])) { concat_name = xstrdup (wrapper); if (check_executable (concat_name)) return concat_name; XFREE (concat_name); } #if defined (HAVE_DOS_BASED_FILE_SYSTEM) } #endif for (p = wrapper; *p; p++) if (*p == '/') { has_slash = 1; break; } if (!has_slash) { /* no slashes; search PATH */ const char *path = getenv ("PATH"); if (path != NULL) { for (p = path; *p; p = p_next) { const char *q; size_t p_len; for (q = p; *q; q++) if (IS_PATH_SEPARATOR (*q)) break; p_len = q - p; p_next = (*q == '\0' ? q : q + 1); if (p_len == 0) { /* empty path: current directory */ if (getcwd (tmp, LT_PATHMAX) == NULL) lt_fatal (__FILE__, __LINE__, "getcwd failed: %s", nonnull (strerror (errno))); tmp_len = strlen (tmp); concat_name = XMALLOC (char, tmp_len + 1 + strlen (wrapper) + 1); memcpy (concat_name, tmp, tmp_len); concat_name[tmp_len] = '/'; strcpy (concat_name + tmp_len + 1, wrapper); } else { concat_name = XMALLOC (char, p_len + 1 + strlen (wrapper) + 1); memcpy (concat_name, p, p_len); concat_name[p_len] = '/'; strcpy (concat_name + p_len + 1, wrapper); } if (check_executable (concat_name)) return concat_name; XFREE (concat_name); } } /* not found in PATH; assume curdir */ } /* Relative path | not found in path: prepend cwd */ if (getcwd (tmp, LT_PATHMAX) == NULL) lt_fatal (__FILE__, __LINE__, "getcwd failed: %s", nonnull (strerror (errno))); tmp_len = strlen (tmp); concat_name = XMALLOC (char, tmp_len + 1 + strlen (wrapper) + 1); memcpy (concat_name, tmp, tmp_len); concat_name[tmp_len] = '/'; strcpy (concat_name + tmp_len + 1, wrapper); if (check_executable (concat_name)) return concat_name; XFREE (concat_name); return NULL; } char * chase_symlinks (const char *pathspec) { #ifndef S_ISLNK return xstrdup (pathspec); #else char buf[LT_PATHMAX]; struct stat s; char *tmp_pathspec = xstrdup (pathspec); char *p; int has_symlinks = 0; while (strlen (tmp_pathspec) && !has_symlinks) { lt_debugprintf (__FILE__, __LINE__, "checking path component for symlinks: %s\n", tmp_pathspec); if (lstat (tmp_pathspec, &s) == 0) { if (S_ISLNK (s.st_mode) != 0) { has_symlinks = 1; break; } /* search backwards for last DIR_SEPARATOR */ p = tmp_pathspec + strlen (tmp_pathspec) - 1; while ((p > tmp_pathspec) && (!IS_DIR_SEPARATOR (*p))) p--; if ((p == tmp_pathspec) && (!IS_DIR_SEPARATOR (*p))) { /* no more DIR_SEPARATORS left */ break; } *p = '\0'; } else { lt_fatal (__FILE__, __LINE__, "error accessing file \"%s\": %s", tmp_pathspec, nonnull (strerror (errno))); } } XFREE (tmp_pathspec); if (!has_symlinks) { return xstrdup (pathspec); } tmp_pathspec = realpath (pathspec, buf); if (tmp_pathspec == 0) { lt_fatal (__FILE__, __LINE__, "could not follow symlinks for %s", pathspec); } return xstrdup (tmp_pathspec); #endif } char * strendzap (char *str, const char *pat) { size_t len, patlen; assert (str != NULL); assert (pat != NULL); len = strlen (str); patlen = strlen (pat); if (patlen <= len) { str += len - patlen; if (strcmp (str, pat) == 0) *str = '\0'; } return str; } void lt_debugprintf (const char *file, int line, const char *fmt, ...) { va_list args; if (lt_debug) { (void) fprintf (stderr, "%s:%s:%d: ", program_name, file, line); va_start (args, fmt); (void) vfprintf (stderr, fmt, args); va_end (args); } } static void lt_error_core (int exit_status, const char *file, int line, const char *mode, const char *message, va_list ap) { fprintf (stderr, "%s:%s:%d: %s: ", program_name, file, line, mode); vfprintf (stderr, message, ap); fprintf (stderr, ".\n"); if (exit_status >= 0) exit (exit_status); } void lt_fatal (const char *file, int line, const char *message, ...) { va_list ap; va_start (ap, message); lt_error_core (EXIT_FAILURE, file, line, "FATAL", message, ap); va_end (ap); } static const char * nonnull (const char *s) { return s ? s : "(null)"; } static const char * nonempty (const char *s) { return (s && !*s) ? "(empty)" : nonnull (s); } void lt_setenv (const char *name, const char *value) { lt_debugprintf (__FILE__, __LINE__, "(lt_setenv) setting '%s' to '%s'\n", nonnull (name), nonnull (value)); { #ifdef HAVE_SETENV /* always make a copy, for consistency with !HAVE_SETENV */ char *str = xstrdup (value); setenv (name, str, 1); #else int len = strlen (name) + 1 + strlen (value) + 1; char *str = XMALLOC (char, len); sprintf (str, "%s=%s", name, value); if (putenv (str) != EXIT_SUCCESS) { XFREE (str); } #endif } } char * lt_extend_str (const char *orig_value, const char *add, int to_end) { char *new_value; if (orig_value && *orig_value) { int orig_value_len = strlen (orig_value); int add_len = strlen (add); new_value = XMALLOC (char, add_len + orig_value_len + 1); if (to_end) { strcpy (new_value, orig_value); strcpy (new_value + orig_value_len, add); } else { strcpy (new_value, add); strcpy (new_value + add_len, orig_value); } } else { new_value = xstrdup (add); } return new_value; } void lt_update_exe_path (const char *name, const char *value) { lt_debugprintf (__FILE__, __LINE__, "(lt_update_exe_path) modifying '%s' by prepending '%s'\n", nonnull (name), nonnull (value)); if (name && *name && value && *value) { char *new_value = lt_extend_str (getenv (name), value, 0); /* some systems can't cope with a ':'-terminated path #' */ int len = strlen (new_value); while (((len = strlen (new_value)) > 0) && IS_PATH_SEPARATOR (new_value[len-1])) { new_value[len-1] = '\0'; } lt_setenv (name, new_value); XFREE (new_value); } } void lt_update_lib_path (const char *name, const char *value) { lt_debugprintf (__FILE__, __LINE__, "(lt_update_lib_path) modifying '%s' by prepending '%s'\n", nonnull (name), nonnull (value)); if (name && *name && value && *value) { char *new_value = lt_extend_str (getenv (name), value, 0); lt_setenv (name, new_value); XFREE (new_value); } } EOF case $host_os in mingw*) cat <<"EOF" /* Prepares an argument vector before calling spawn(). Note that spawn() does not by itself call the command interpreter (getenv ("COMSPEC") != NULL ? getenv ("COMSPEC") : ({ OSVERSIONINFO v; v.dwOSVersionInfoSize = sizeof(OSVERSIONINFO); GetVersionEx(&v); v.dwPlatformId == VER_PLATFORM_WIN32_NT; }) ? "cmd.exe" : "command.com"). Instead it simply concatenates the arguments, separated by ' ', and calls CreateProcess(). We must quote the arguments since Win32 CreateProcess() interprets characters like ' ', '\t', '\\', '"' (but not '<' and '>') in a special way: - Space and tab are interpreted as delimiters. They are not treated as delimiters if they are surrounded by double quotes: "...". - Unescaped double quotes are removed from the input. Their only effect is that within double quotes, space and tab are treated like normal characters. - Backslashes not followed by double quotes are not special. - But 2*n+1 backslashes followed by a double quote become n backslashes followed by a double quote (n >= 0): \" -> " \\\" -> \" \\\\\" -> \\" */ #define SHELL_SPECIAL_CHARS "\"\\ \001\002\003\004\005\006\007\010\011\012\013\014\015\016\017\020\021\022\023\024\025\026\027\030\031\032\033\034\035\036\037" #define SHELL_SPACE_CHARS " \001\002\003\004\005\006\007\010\011\012\013\014\015\016\017\020\021\022\023\024\025\026\027\030\031\032\033\034\035\036\037" char ** prepare_spawn (char **argv) { size_t argc; char **new_argv; size_t i; /* Count number of arguments. */ for (argc = 0; argv[argc] != NULL; argc++) ; /* Allocate new argument vector. */ new_argv = XMALLOC (char *, argc + 1); /* Put quoted arguments into the new argument vector. */ for (i = 0; i < argc; i++) { const char *string = argv[i]; if (string[0] == '\0') new_argv[i] = xstrdup ("\"\""); else if (strpbrk (string, SHELL_SPECIAL_CHARS) != NULL) { int quote_around = (strpbrk (string, SHELL_SPACE_CHARS) != NULL); size_t length; unsigned int backslashes; const char *s; char *quoted_string; char *p; length = 0; backslashes = 0; if (quote_around) length++; for (s = string; *s != '\0'; s++) { char c = *s; if (c == '"') length += backslashes + 1; length++; if (c == '\\') backslashes++; else backslashes = 0; } if (quote_around) length += backslashes + 1; quoted_string = XMALLOC (char, length + 1); p = quoted_string; backslashes = 0; if (quote_around) *p++ = '"'; for (s = string; *s != '\0'; s++) { char c = *s; if (c == '"') { unsigned int j; for (j = backslashes + 1; j > 0; j--) *p++ = '\\'; } *p++ = c; if (c == '\\') backslashes++; else backslashes = 0; } if (quote_around) { unsigned int j; for (j = backslashes; j > 0; j--) *p++ = '\\'; *p++ = '"'; } *p = '\0'; new_argv[i] = quoted_string; } else new_argv[i] = (char *) string; } new_argv[argc] = NULL; return new_argv; } EOF ;; esac cat <<"EOF" void lt_dump_script (FILE* f) { EOF func_emit_wrapper yes | $SED -n -e ' s/^\(.\{79\}\)\(..*\)/\1\ \2/ h s/\([\\"]\)/\\\1/g s/$/\\n/ s/\([^\n]*\).*/ fputs ("\1", f);/p g D' cat <<"EOF" } EOF } # end: func_emit_cwrapperexe_src # func_win32_import_lib_p ARG # True if ARG is an import lib, as indicated by $file_magic_cmd func_win32_import_lib_p () { $opt_debug case `eval $file_magic_cmd \"\$1\" 2>/dev/null | $SED -e 10q` in *import*) : ;; *) false ;; esac } # func_mode_link arg... func_mode_link () { $opt_debug case $host in *-*-cygwin* | *-*-mingw* | *-*-pw32* | *-*-os2* | *-cegcc*) # It is impossible to link a dll without this setting, and # we shouldn't force the makefile maintainer to figure out # which system we are compiling for in order to pass an extra # flag for every libtool invocation. # allow_undefined=no # FIXME: Unfortunately, there are problems with the above when trying # to make a dll which has undefined symbols, in which case not # even a static library is built. For now, we need to specify # -no-undefined on the libtool link line when we can be certain # that all symbols are satisfied, otherwise we get a static library. allow_undefined=yes ;; *) allow_undefined=yes ;; esac libtool_args=$nonopt base_compile="$nonopt $@" compile_command=$nonopt finalize_command=$nonopt compile_rpath= finalize_rpath= compile_shlibpath= finalize_shlibpath= convenience= old_convenience= deplibs= old_deplibs= compiler_flags= linker_flags= dllsearchpath= lib_search_path=`pwd` inst_prefix_dir= new_inherited_linker_flags= avoid_version=no bindir= dlfiles= dlprefiles= dlself=no export_dynamic=no export_symbols= export_symbols_regex= generated= libobjs= ltlibs= module=no no_install=no objs= non_pic_objects= precious_files_regex= prefer_static_libs=no preload=no prev= prevarg= release= rpath= xrpath= perm_rpath= temp_rpath= thread_safe=no vinfo= vinfo_number=no weak_libs= single_module="${wl}-single_module" func_infer_tag $base_compile # We need to know -static, to get the right output filenames. for arg do case $arg in -shared) test "$build_libtool_libs" != yes && \ func_fatal_configuration "can not build a shared library" build_old_libs=no break ;; -all-static | -static | -static-libtool-libs) case $arg in -all-static) if test "$build_libtool_libs" = yes && test -z "$link_static_flag"; then func_warning "complete static linking is impossible in this configuration" fi if test -n "$link_static_flag"; then dlopen_self=$dlopen_self_static fi prefer_static_libs=yes ;; -static) if test -z "$pic_flag" && test -n "$link_static_flag"; then dlopen_self=$dlopen_self_static fi prefer_static_libs=built ;; -static-libtool-libs) if test -z "$pic_flag" && test -n "$link_static_flag"; then dlopen_self=$dlopen_self_static fi prefer_static_libs=yes ;; esac build_libtool_libs=no build_old_libs=yes break ;; esac done # See if our shared archives depend on static archives. test -n "$old_archive_from_new_cmds" && build_old_libs=yes # Go through the arguments, transforming them on the way. while test "$#" -gt 0; do arg="$1" shift func_quote_for_eval "$arg" qarg=$func_quote_for_eval_unquoted_result func_append libtool_args " $func_quote_for_eval_result" # If the previous option needs an argument, assign it. if test -n "$prev"; then case $prev in output) func_append compile_command " @OUTPUT@" func_append finalize_command " @OUTPUT@" ;; esac case $prev in bindir) bindir="$arg" prev= continue ;; dlfiles|dlprefiles) if test "$preload" = no; then # Add the symbol object into the linking commands. func_append compile_command " @SYMFILE@" func_append finalize_command " @SYMFILE@" preload=yes fi case $arg in *.la | *.lo) ;; # We handle these cases below. force) if test "$dlself" = no; then dlself=needless export_dynamic=yes fi prev= continue ;; self) if test "$prev" = dlprefiles; then dlself=yes elif test "$prev" = dlfiles && test "$dlopen_self" != yes; then dlself=yes else dlself=needless export_dynamic=yes fi prev= continue ;; *) if test "$prev" = dlfiles; then func_append dlfiles " $arg" else func_append dlprefiles " $arg" fi prev= continue ;; esac ;; expsyms) export_symbols="$arg" test -f "$arg" \ || func_fatal_error "symbol file \`$arg' does not exist" prev= continue ;; expsyms_regex) export_symbols_regex="$arg" prev= continue ;; framework) case $host in *-*-darwin*) case "$deplibs " in *" $qarg.ltframework "*) ;; *) func_append deplibs " $qarg.ltframework" # this is fixed later ;; esac ;; esac prev= continue ;; inst_prefix) inst_prefix_dir="$arg" prev= continue ;; objectlist) if test -f "$arg"; then save_arg=$arg moreargs= for fil in `cat "$save_arg"` do # func_append moreargs " $fil" arg=$fil # A libtool-controlled object. # Check to see that this really is a libtool object. if func_lalib_unsafe_p "$arg"; then pic_object= non_pic_object= # Read the .lo file func_source "$arg" if test -z "$pic_object" || test -z "$non_pic_object" || test "$pic_object" = none && test "$non_pic_object" = none; then func_fatal_error "cannot find name of object for \`$arg'" fi # Extract subdirectory from the argument. func_dirname "$arg" "/" "" xdir="$func_dirname_result" if test "$pic_object" != none; then # Prepend the subdirectory the object is found in. pic_object="$xdir$pic_object" if test "$prev" = dlfiles; then if test "$build_libtool_libs" = yes && test "$dlopen_support" = yes; then func_append dlfiles " $pic_object" prev= continue else # If libtool objects are unsupported, then we need to preload. prev=dlprefiles fi fi # CHECK ME: I think I busted this. -Ossama if test "$prev" = dlprefiles; then # Preload the old-style object. func_append dlprefiles " $pic_object" prev= fi # A PIC object. func_append libobjs " $pic_object" arg="$pic_object" fi # Non-PIC object. if test "$non_pic_object" != none; then # Prepend the subdirectory the object is found in. non_pic_object="$xdir$non_pic_object" # A standard non-PIC object func_append non_pic_objects " $non_pic_object" if test -z "$pic_object" || test "$pic_object" = none ; then arg="$non_pic_object" fi else # If the PIC object exists, use it instead. # $xdir was prepended to $pic_object above. non_pic_object="$pic_object" func_append non_pic_objects " $non_pic_object" fi else # Only an error if not doing a dry-run. if $opt_dry_run; then # Extract subdirectory from the argument. func_dirname "$arg" "/" "" xdir="$func_dirname_result" func_lo2o "$arg" pic_object=$xdir$objdir/$func_lo2o_result non_pic_object=$xdir$func_lo2o_result func_append libobjs " $pic_object" func_append non_pic_objects " $non_pic_object" else func_fatal_error "\`$arg' is not a valid libtool object" fi fi done else func_fatal_error "link input file \`$arg' does not exist" fi arg=$save_arg prev= continue ;; precious_regex) precious_files_regex="$arg" prev= continue ;; release) release="-$arg" prev= continue ;; rpath | xrpath) # We need an absolute path. case $arg in [\\/]* | [A-Za-z]:[\\/]*) ;; *) func_fatal_error "only absolute run-paths are allowed" ;; esac if test "$prev" = rpath; then case "$rpath " in *" $arg "*) ;; *) func_append rpath " $arg" ;; esac else case "$xrpath " in *" $arg "*) ;; *) func_append xrpath " $arg" ;; esac fi prev= continue ;; shrext) shrext_cmds="$arg" prev= continue ;; weak) func_append weak_libs " $arg" prev= continue ;; xcclinker) func_append linker_flags " $qarg" func_append compiler_flags " $qarg" prev= func_append compile_command " $qarg" func_append finalize_command " $qarg" continue ;; xcompiler) func_append compiler_flags " $qarg" prev= func_append compile_command " $qarg" func_append finalize_command " $qarg" continue ;; xlinker) func_append linker_flags " $qarg" func_append compiler_flags " $wl$qarg" prev= func_append compile_command " $wl$qarg" func_append finalize_command " $wl$qarg" continue ;; *) eval "$prev=\"\$arg\"" prev= continue ;; esac fi # test -n "$prev" prevarg="$arg" case $arg in -all-static) if test -n "$link_static_flag"; then # See comment for -static flag below, for more details. func_append compile_command " $link_static_flag" func_append finalize_command " $link_static_flag" fi continue ;; -allow-undefined) # FIXME: remove this flag sometime in the future. func_fatal_error "\`-allow-undefined' must not be used because it is the default" ;; -avoid-version) avoid_version=yes continue ;; -bindir) prev=bindir continue ;; -dlopen) prev=dlfiles continue ;; -dlpreopen) prev=dlprefiles continue ;; -export-dynamic) export_dynamic=yes continue ;; -export-symbols | -export-symbols-regex) if test -n "$export_symbols" || test -n "$export_symbols_regex"; then func_fatal_error "more than one -exported-symbols argument is not allowed" fi if test "X$arg" = "X-export-symbols"; then prev=expsyms else prev=expsyms_regex fi continue ;; -framework) prev=framework continue ;; -inst-prefix-dir) prev=inst_prefix continue ;; # The native IRIX linker understands -LANG:*, -LIST:* and -LNO:* # so, if we see these flags be careful not to treat them like -L -L[A-Z][A-Z]*:*) case $with_gcc/$host in no/*-*-irix* | /*-*-irix*) func_append compile_command " $arg" func_append finalize_command " $arg" ;; esac continue ;; -L*) func_stripname "-L" '' "$arg" if test -z "$func_stripname_result"; then if test "$#" -gt 0; then func_fatal_error "require no space between \`-L' and \`$1'" else func_fatal_error "need path for \`-L' option" fi fi func_resolve_sysroot "$func_stripname_result" dir=$func_resolve_sysroot_result # We need an absolute path. case $dir in [\\/]* | [A-Za-z]:[\\/]*) ;; *) absdir=`cd "$dir" && pwd` test -z "$absdir" && \ func_fatal_error "cannot determine absolute directory name of \`$dir'" dir="$absdir" ;; esac case "$deplibs " in *" -L$dir "* | *" $arg "*) # Will only happen for absolute or sysroot arguments ;; *) # Preserve sysroot, but never include relative directories case $dir in [\\/]* | [A-Za-z]:[\\/]* | =*) func_append deplibs " $arg" ;; *) func_append deplibs " -L$dir" ;; esac func_append lib_search_path " $dir" ;; esac case $host in *-*-cygwin* | *-*-mingw* | *-*-pw32* | *-*-os2* | *-cegcc*) testbindir=`$ECHO "$dir" | $SED 's*/lib$*/bin*'` case :$dllsearchpath: in *":$dir:"*) ;; ::) dllsearchpath=$dir;; *) func_append dllsearchpath ":$dir";; esac case :$dllsearchpath: in *":$testbindir:"*) ;; ::) dllsearchpath=$testbindir;; *) func_append dllsearchpath ":$testbindir";; esac ;; esac continue ;; -l*) if test "X$arg" = "X-lc" || test "X$arg" = "X-lm"; then case $host in *-*-cygwin* | *-*-mingw* | *-*-pw32* | *-*-beos* | *-cegcc* | *-*-haiku*) # These systems don't actually have a C or math library (as such) continue ;; *-*-os2*) # These systems don't actually have a C library (as such) test "X$arg" = "X-lc" && continue ;; *-*-openbsd* | *-*-freebsd* | *-*-dragonfly*) # Do not include libc due to us having libc/libc_r. test "X$arg" = "X-lc" && continue ;; *-*-rhapsody* | *-*-darwin1.[012]) # Rhapsody C and math libraries are in the System framework func_append deplibs " System.ltframework" continue ;; *-*-sco3.2v5* | *-*-sco5v6*) # Causes problems with __ctype test "X$arg" = "X-lc" && continue ;; *-*-sysv4.2uw2* | *-*-sysv5* | *-*-unixware* | *-*-OpenUNIX*) # Compiler inserts libc in the correct place for threads to work test "X$arg" = "X-lc" && continue ;; esac elif test "X$arg" = "X-lc_r"; then case $host in *-*-openbsd* | *-*-freebsd* | *-*-dragonfly*) # Do not include libc_r directly, use -pthread flag. continue ;; esac fi func_append deplibs " $arg" continue ;; -module) module=yes continue ;; # Tru64 UNIX uses -model [arg] to determine the layout of C++ # classes, name mangling, and exception handling. # Darwin uses the -arch flag to determine output architecture. -model|-arch|-isysroot|--sysroot) func_append compiler_flags " $arg" func_append compile_command " $arg" func_append finalize_command " $arg" prev=xcompiler continue ;; -mt|-mthreads|-kthread|-Kthread|-pthread|-pthreads|--thread-safe \ |-threads|-fopenmp|-openmp|-mp|-xopenmp|-omp|-qsmp=*) func_append compiler_flags " $arg" func_append compile_command " $arg" func_append finalize_command " $arg" case "$new_inherited_linker_flags " in *" $arg "*) ;; * ) func_append new_inherited_linker_flags " $arg" ;; esac continue ;; -multi_module) single_module="${wl}-multi_module" continue ;; -no-fast-install) fast_install=no continue ;; -no-install) case $host in *-*-cygwin* | *-*-mingw* | *-*-pw32* | *-*-os2* | *-*-darwin* | *-cegcc*) # The PATH hackery in wrapper scripts is required on Windows # and Darwin in order for the loader to find any dlls it needs. func_warning "\`-no-install' is ignored for $host" func_warning "assuming \`-no-fast-install' instead" fast_install=no ;; *) no_install=yes ;; esac continue ;; -no-undefined) allow_undefined=no continue ;; -objectlist) prev=objectlist continue ;; -o) prev=output ;; -precious-files-regex) prev=precious_regex continue ;; -release) prev=release continue ;; -rpath) prev=rpath continue ;; -R) prev=xrpath continue ;; -R*) func_stripname '-R' '' "$arg" dir=$func_stripname_result # We need an absolute path. case $dir in [\\/]* | [A-Za-z]:[\\/]*) ;; =*) func_stripname '=' '' "$dir" dir=$lt_sysroot$func_stripname_result ;; *) func_fatal_error "only absolute run-paths are allowed" ;; esac case "$xrpath " in *" $dir "*) ;; *) func_append xrpath " $dir" ;; esac continue ;; -shared) # The effects of -shared are defined in a previous loop. continue ;; -shrext) prev=shrext continue ;; -static | -static-libtool-libs) # The effects of -static are defined in a previous loop. # We used to do the same as -all-static on platforms that # didn't have a PIC flag, but the assumption that the effects # would be equivalent was wrong. It would break on at least # Digital Unix and AIX. continue ;; -thread-safe) thread_safe=yes continue ;; -version-info) prev=vinfo continue ;; -version-number) prev=vinfo vinfo_number=yes continue ;; -weak) prev=weak continue ;; -Wc,*) func_stripname '-Wc,' '' "$arg" args=$func_stripname_result arg= save_ifs="$IFS"; IFS=',' for flag in $args; do IFS="$save_ifs" func_quote_for_eval "$flag" func_append arg " $func_quote_for_eval_result" func_append compiler_flags " $func_quote_for_eval_result" done IFS="$save_ifs" func_stripname ' ' '' "$arg" arg=$func_stripname_result ;; -Wl,*) func_stripname '-Wl,' '' "$arg" args=$func_stripname_result arg= save_ifs="$IFS"; IFS=',' for flag in $args; do IFS="$save_ifs" func_quote_for_eval "$flag" func_append arg " $wl$func_quote_for_eval_result" func_append compiler_flags " $wl$func_quote_for_eval_result" func_append linker_flags " $func_quote_for_eval_result" done IFS="$save_ifs" func_stripname ' ' '' "$arg" arg=$func_stripname_result ;; -Xcompiler) prev=xcompiler continue ;; -Xlinker) prev=xlinker continue ;; -XCClinker) prev=xcclinker continue ;; # -msg_* for osf cc -msg_*) func_quote_for_eval "$arg" arg="$func_quote_for_eval_result" ;; # Flags to be passed through unchanged, with rationale: # -64, -mips[0-9] enable 64-bit mode for the SGI compiler # -r[0-9][0-9]* specify processor for the SGI compiler # -xarch=*, -xtarget=* enable 64-bit mode for the Sun compiler # +DA*, +DD* enable 64-bit mode for the HP compiler # -q* compiler args for the IBM compiler # -m*, -t[45]*, -txscale* architecture-specific flags for GCC # -F/path path to uninstalled frameworks, gcc on darwin # -p, -pg, --coverage, -fprofile-* profiling flags for GCC # @file GCC response files # -tp=* Portland pgcc target processor selection # --sysroot=* for sysroot support # -O*, -flto*, -fwhopr*, -fuse-linker-plugin GCC link-time optimization -64|-mips[0-9]|-r[0-9][0-9]*|-xarch=*|-xtarget=*|+DA*|+DD*|-q*|-m*| \ -t[45]*|-txscale*|-p|-pg|--coverage|-fprofile-*|-F*|@*|-tp=*|--sysroot=*| \ -O*|-flto*|-fwhopr*|-fuse-linker-plugin) func_quote_for_eval "$arg" arg="$func_quote_for_eval_result" func_append compile_command " $arg" func_append finalize_command " $arg" func_append compiler_flags " $arg" continue ;; # Some other compiler flag. -* | +*) func_quote_for_eval "$arg" arg="$func_quote_for_eval_result" ;; *.$objext) # A standard object. func_append objs " $arg" ;; *.lo) # A libtool-controlled object. # Check to see that this really is a libtool object. if func_lalib_unsafe_p "$arg"; then pic_object= non_pic_object= # Read the .lo file func_source "$arg" if test -z "$pic_object" || test -z "$non_pic_object" || test "$pic_object" = none && test "$non_pic_object" = none; then func_fatal_error "cannot find name of object for \`$arg'" fi # Extract subdirectory from the argument. func_dirname "$arg" "/" "" xdir="$func_dirname_result" if test "$pic_object" != none; then # Prepend the subdirectory the object is found in. pic_object="$xdir$pic_object" if test "$prev" = dlfiles; then if test "$build_libtool_libs" = yes && test "$dlopen_support" = yes; then func_append dlfiles " $pic_object" prev= continue else # If libtool objects are unsupported, then we need to preload. prev=dlprefiles fi fi # CHECK ME: I think I busted this. -Ossama if test "$prev" = dlprefiles; then # Preload the old-style object. func_append dlprefiles " $pic_object" prev= fi # A PIC object. func_append libobjs " $pic_object" arg="$pic_object" fi # Non-PIC object. if test "$non_pic_object" != none; then # Prepend the subdirectory the object is found in. non_pic_object="$xdir$non_pic_object" # A standard non-PIC object func_append non_pic_objects " $non_pic_object" if test -z "$pic_object" || test "$pic_object" = none ; then arg="$non_pic_object" fi else # If the PIC object exists, use it instead. # $xdir was prepended to $pic_object above. non_pic_object="$pic_object" func_append non_pic_objects " $non_pic_object" fi else # Only an error if not doing a dry-run. if $opt_dry_run; then # Extract subdirectory from the argument. func_dirname "$arg" "/" "" xdir="$func_dirname_result" func_lo2o "$arg" pic_object=$xdir$objdir/$func_lo2o_result non_pic_object=$xdir$func_lo2o_result func_append libobjs " $pic_object" func_append non_pic_objects " $non_pic_object" else func_fatal_error "\`$arg' is not a valid libtool object" fi fi ;; *.$libext) # An archive. func_append deplibs " $arg" func_append old_deplibs " $arg" continue ;; *.la) # A libtool-controlled library. func_resolve_sysroot "$arg" if test "$prev" = dlfiles; then # This library was specified with -dlopen. func_append dlfiles " $func_resolve_sysroot_result" prev= elif test "$prev" = dlprefiles; then # The library was specified with -dlpreopen. func_append dlprefiles " $func_resolve_sysroot_result" prev= else func_append deplibs " $func_resolve_sysroot_result" fi continue ;; # Some other compiler argument. *) # Unknown arguments in both finalize_command and compile_command need # to be aesthetically quoted because they are evaled later. func_quote_for_eval "$arg" arg="$func_quote_for_eval_result" ;; esac # arg # Now actually substitute the argument into the commands. if test -n "$arg"; then func_append compile_command " $arg" func_append finalize_command " $arg" fi done # argument parsing loop test -n "$prev" && \ func_fatal_help "the \`$prevarg' option requires an argument" if test "$export_dynamic" = yes && test -n "$export_dynamic_flag_spec"; then eval arg=\"$export_dynamic_flag_spec\" func_append compile_command " $arg" func_append finalize_command " $arg" fi oldlibs= # calculate the name of the file, without its directory func_basename "$output" outputname="$func_basename_result" libobjs_save="$libobjs" if test -n "$shlibpath_var"; then # get the directories listed in $shlibpath_var eval shlib_search_path=\`\$ECHO \"\${$shlibpath_var}\" \| \$SED \'s/:/ /g\'\` else shlib_search_path= fi eval sys_lib_search_path=\"$sys_lib_search_path_spec\" eval sys_lib_dlsearch_path=\"$sys_lib_dlsearch_path_spec\" func_dirname "$output" "/" "" output_objdir="$func_dirname_result$objdir" func_to_tool_file "$output_objdir/" tool_output_objdir=$func_to_tool_file_result # Create the object directory. func_mkdir_p "$output_objdir" # Determine the type of output case $output in "") func_fatal_help "you must specify an output file" ;; *.$libext) linkmode=oldlib ;; *.lo | *.$objext) linkmode=obj ;; *.la) linkmode=lib ;; *) linkmode=prog ;; # Anything else should be a program. esac specialdeplibs= libs= # Find all interdependent deplibs by searching for libraries # that are linked more than once (e.g. -la -lb -la) for deplib in $deplibs; do if $opt_preserve_dup_deps ; then case "$libs " in *" $deplib "*) func_append specialdeplibs " $deplib" ;; esac fi func_append libs " $deplib" done if test "$linkmode" = lib; then libs="$predeps $libs $compiler_lib_search_path $postdeps" # Compute libraries that are listed more than once in $predeps # $postdeps and mark them as special (i.e., whose duplicates are # not to be eliminated). pre_post_deps= if $opt_duplicate_compiler_generated_deps; then for pre_post_dep in $predeps $postdeps; do case "$pre_post_deps " in *" $pre_post_dep "*) func_append specialdeplibs " $pre_post_deps" ;; esac func_append pre_post_deps " $pre_post_dep" done fi pre_post_deps= fi deplibs= newdependency_libs= newlib_search_path= need_relink=no # whether we're linking any uninstalled libtool libraries notinst_deplibs= # not-installed libtool libraries notinst_path= # paths that contain not-installed libtool libraries case $linkmode in lib) passes="conv dlpreopen link" for file in $dlfiles $dlprefiles; do case $file in *.la) ;; *) func_fatal_help "libraries can \`-dlopen' only libtool libraries: $file" ;; esac done ;; prog) compile_deplibs= finalize_deplibs= alldeplibs=no newdlfiles= newdlprefiles= passes="conv scan dlopen dlpreopen link" ;; *) passes="conv" ;; esac for pass in $passes; do # The preopen pass in lib mode reverses $deplibs; put it back here # so that -L comes before libs that need it for instance... if test "$linkmode,$pass" = "lib,link"; then ## FIXME: Find the place where the list is rebuilt in the wrong ## order, and fix it there properly tmp_deplibs= for deplib in $deplibs; do tmp_deplibs="$deplib $tmp_deplibs" done deplibs="$tmp_deplibs" fi if test "$linkmode,$pass" = "lib,link" || test "$linkmode,$pass" = "prog,scan"; then libs="$deplibs" deplibs= fi if test "$linkmode" = prog; then case $pass in dlopen) libs="$dlfiles" ;; dlpreopen) libs="$dlprefiles" ;; link) libs="$deplibs %DEPLIBS% $dependency_libs" ;; esac fi if test "$linkmode,$pass" = "lib,dlpreopen"; then # Collect and forward deplibs of preopened libtool libs for lib in $dlprefiles; do # Ignore non-libtool-libs dependency_libs= func_resolve_sysroot "$lib" case $lib in *.la) func_source "$func_resolve_sysroot_result" ;; esac # Collect preopened libtool deplibs, except any this library # has declared as weak libs for deplib in $dependency_libs; do func_basename "$deplib" deplib_base=$func_basename_result case " $weak_libs " in *" $deplib_base "*) ;; *) func_append deplibs " $deplib" ;; esac done done libs="$dlprefiles" fi if test "$pass" = dlopen; then # Collect dlpreopened libraries save_deplibs="$deplibs" deplibs= fi for deplib in $libs; do lib= found=no case $deplib in -mt|-mthreads|-kthread|-Kthread|-pthread|-pthreads|--thread-safe \ |-threads|-fopenmp|-openmp|-mp|-xopenmp|-omp|-qsmp=*) if test "$linkmode,$pass" = "prog,link"; then compile_deplibs="$deplib $compile_deplibs" finalize_deplibs="$deplib $finalize_deplibs" else func_append compiler_flags " $deplib" if test "$linkmode" = lib ; then case "$new_inherited_linker_flags " in *" $deplib "*) ;; * ) func_append new_inherited_linker_flags " $deplib" ;; esac fi fi continue ;; -l*) if test "$linkmode" != lib && test "$linkmode" != prog; then func_warning "\`-l' is ignored for archives/objects" continue fi func_stripname '-l' '' "$deplib" name=$func_stripname_result if test "$linkmode" = lib; then searchdirs="$newlib_search_path $lib_search_path $compiler_lib_search_dirs $sys_lib_search_path $shlib_search_path" else searchdirs="$newlib_search_path $lib_search_path $sys_lib_search_path $shlib_search_path" fi for searchdir in $searchdirs; do for search_ext in .la $std_shrext .so .a; do # Search the libtool library lib="$searchdir/lib${name}${search_ext}" if test -f "$lib"; then if test "$search_ext" = ".la"; then found=yes else found=no fi break 2 fi done done if test "$found" != yes; then # deplib doesn't seem to be a libtool library if test "$linkmode,$pass" = "prog,link"; then compile_deplibs="$deplib $compile_deplibs" finalize_deplibs="$deplib $finalize_deplibs" else deplibs="$deplib $deplibs" test "$linkmode" = lib && newdependency_libs="$deplib $newdependency_libs" fi continue else # deplib is a libtool library # If $allow_libtool_libs_with_static_runtimes && $deplib is a stdlib, # We need to do some special things here, and not later. if test "X$allow_libtool_libs_with_static_runtimes" = "Xyes" ; then case " $predeps $postdeps " in *" $deplib "*) if func_lalib_p "$lib"; then library_names= old_library= func_source "$lib" for l in $old_library $library_names; do ll="$l" done if test "X$ll" = "X$old_library" ; then # only static version available found=no func_dirname "$lib" "" "." ladir="$func_dirname_result" lib=$ladir/$old_library if test "$linkmode,$pass" = "prog,link"; then compile_deplibs="$deplib $compile_deplibs" finalize_deplibs="$deplib $finalize_deplibs" else deplibs="$deplib $deplibs" test "$linkmode" = lib && newdependency_libs="$deplib $newdependency_libs" fi continue fi fi ;; *) ;; esac fi fi ;; # -l *.ltframework) if test "$linkmode,$pass" = "prog,link"; then compile_deplibs="$deplib $compile_deplibs" finalize_deplibs="$deplib $finalize_deplibs" else deplibs="$deplib $deplibs" if test "$linkmode" = lib ; then case "$new_inherited_linker_flags " in *" $deplib "*) ;; * ) func_append new_inherited_linker_flags " $deplib" ;; esac fi fi continue ;; -L*) case $linkmode in lib) deplibs="$deplib $deplibs" test "$pass" = conv && continue newdependency_libs="$deplib $newdependency_libs" func_stripname '-L' '' "$deplib" func_resolve_sysroot "$func_stripname_result" func_append newlib_search_path " $func_resolve_sysroot_result" ;; prog) if test "$pass" = conv; then deplibs="$deplib $deplibs" continue fi if test "$pass" = scan; then deplibs="$deplib $deplibs" else compile_deplibs="$deplib $compile_deplibs" finalize_deplibs="$deplib $finalize_deplibs" fi func_stripname '-L' '' "$deplib" func_resolve_sysroot "$func_stripname_result" func_append newlib_search_path " $func_resolve_sysroot_result" ;; *) func_warning "\`-L' is ignored for archives/objects" ;; esac # linkmode continue ;; # -L -R*) if test "$pass" = link; then func_stripname '-R' '' "$deplib" func_resolve_sysroot "$func_stripname_result" dir=$func_resolve_sysroot_result # Make sure the xrpath contains only unique directories. case "$xrpath " in *" $dir "*) ;; *) func_append xrpath " $dir" ;; esac fi deplibs="$deplib $deplibs" continue ;; *.la) func_resolve_sysroot "$deplib" lib=$func_resolve_sysroot_result ;; *.$libext) if test "$pass" = conv; then deplibs="$deplib $deplibs" continue fi case $linkmode in lib) # Linking convenience modules into shared libraries is allowed, # but linking other static libraries is non-portable. case " $dlpreconveniencelibs " in *" $deplib "*) ;; *) valid_a_lib=no case $deplibs_check_method in match_pattern*) set dummy $deplibs_check_method; shift match_pattern_regex=`expr "$deplibs_check_method" : "$1 \(.*\)"` if eval "\$ECHO \"$deplib\"" 2>/dev/null | $SED 10q \ | $EGREP "$match_pattern_regex" > /dev/null; then valid_a_lib=yes fi ;; pass_all) valid_a_lib=yes ;; esac if test "$valid_a_lib" != yes; then echo $ECHO "*** Warning: Trying to link with static lib archive $deplib." echo "*** I have the capability to make that library automatically link in when" echo "*** you link to this library. But I can only do this if you have a" echo "*** shared version of the library, which you do not appear to have" echo "*** because the file extensions .$libext of this argument makes me believe" echo "*** that it is just a static archive that I should not use here." else echo $ECHO "*** Warning: Linking the shared library $output against the" $ECHO "*** static library $deplib is not portable!" deplibs="$deplib $deplibs" fi ;; esac continue ;; prog) if test "$pass" != link; then deplibs="$deplib $deplibs" else compile_deplibs="$deplib $compile_deplibs" finalize_deplibs="$deplib $finalize_deplibs" fi continue ;; esac # linkmode ;; # *.$libext *.lo | *.$objext) if test "$pass" = conv; then deplibs="$deplib $deplibs" elif test "$linkmode" = prog; then if test "$pass" = dlpreopen || test "$dlopen_support" != yes || test "$build_libtool_libs" = no; then # If there is no dlopen support or we're linking statically, # we need to preload. func_append newdlprefiles " $deplib" compile_deplibs="$deplib $compile_deplibs" finalize_deplibs="$deplib $finalize_deplibs" else func_append newdlfiles " $deplib" fi fi continue ;; %DEPLIBS%) alldeplibs=yes continue ;; esac # case $deplib if test "$found" = yes || test -f "$lib"; then : else func_fatal_error "cannot find the library \`$lib' or unhandled argument \`$deplib'" fi # Check to see that this really is a libtool archive. func_lalib_unsafe_p "$lib" \ || func_fatal_error "\`$lib' is not a valid libtool archive" func_dirname "$lib" "" "." ladir="$func_dirname_result" dlname= dlopen= dlpreopen= libdir= library_names= old_library= inherited_linker_flags= # If the library was installed with an old release of libtool, # it will not redefine variables installed, or shouldnotlink installed=yes shouldnotlink=no avoidtemprpath= # Read the .la file func_source "$lib" # Convert "-framework foo" to "foo.ltframework" if test -n "$inherited_linker_flags"; then tmp_inherited_linker_flags=`$ECHO "$inherited_linker_flags" | $SED 's/-framework \([^ $]*\)/\1.ltframework/g'` for tmp_inherited_linker_flag in $tmp_inherited_linker_flags; do case " $new_inherited_linker_flags " in *" $tmp_inherited_linker_flag "*) ;; *) func_append new_inherited_linker_flags " $tmp_inherited_linker_flag";; esac done fi dependency_libs=`$ECHO " $dependency_libs" | $SED 's% \([^ $]*\).ltframework% -framework \1%g'` if test "$linkmode,$pass" = "lib,link" || test "$linkmode,$pass" = "prog,scan" || { test "$linkmode" != prog && test "$linkmode" != lib; }; then test -n "$dlopen" && func_append dlfiles " $dlopen" test -n "$dlpreopen" && func_append dlprefiles " $dlpreopen" fi if test "$pass" = conv; then # Only check for convenience libraries deplibs="$lib $deplibs" if test -z "$libdir"; then if test -z "$old_library"; then func_fatal_error "cannot find name of link library for \`$lib'" fi # It is a libtool convenience library, so add in its objects. func_append convenience " $ladir/$objdir/$old_library" func_append old_convenience " $ladir/$objdir/$old_library" elif test "$linkmode" != prog && test "$linkmode" != lib; then func_fatal_error "\`$lib' is not a convenience library" fi tmp_libs= for deplib in $dependency_libs; do deplibs="$deplib $deplibs" if $opt_preserve_dup_deps ; then case "$tmp_libs " in *" $deplib "*) func_append specialdeplibs " $deplib" ;; esac fi func_append tmp_libs " $deplib" done continue fi # $pass = conv # Get the name of the library we link against. linklib= if test -n "$old_library" && { test "$prefer_static_libs" = yes || test "$prefer_static_libs,$installed" = "built,no"; }; then linklib=$old_library else for l in $old_library $library_names; do linklib="$l" done fi if test -z "$linklib"; then func_fatal_error "cannot find name of link library for \`$lib'" fi # This library was specified with -dlopen. if test "$pass" = dlopen; then if test -z "$libdir"; then func_fatal_error "cannot -dlopen a convenience library: \`$lib'" fi if test -z "$dlname" || test "$dlopen_support" != yes || test "$build_libtool_libs" = no; then # If there is no dlname, no dlopen support or we're linking # statically, we need to preload. We also need to preload any # dependent libraries so libltdl's deplib preloader doesn't # bomb out in the load deplibs phase. func_append dlprefiles " $lib $dependency_libs" else func_append newdlfiles " $lib" fi continue fi # $pass = dlopen # We need an absolute path. case $ladir in [\\/]* | [A-Za-z]:[\\/]*) abs_ladir="$ladir" ;; *) abs_ladir=`cd "$ladir" && pwd` if test -z "$abs_ladir"; then func_warning "cannot determine absolute directory name of \`$ladir'" func_warning "passing it literally to the linker, although it might fail" abs_ladir="$ladir" fi ;; esac func_basename "$lib" laname="$func_basename_result" # Find the relevant object directory and library name. if test "X$installed" = Xyes; then if test ! -f "$lt_sysroot$libdir/$linklib" && test -f "$abs_ladir/$linklib"; then func_warning "library \`$lib' was moved." dir="$ladir" absdir="$abs_ladir" libdir="$abs_ladir" else dir="$lt_sysroot$libdir" absdir="$lt_sysroot$libdir" fi test "X$hardcode_automatic" = Xyes && avoidtemprpath=yes else if test ! -f "$ladir/$objdir/$linklib" && test -f "$abs_ladir/$linklib"; then dir="$ladir" absdir="$abs_ladir" # Remove this search path later func_append notinst_path " $abs_ladir" else dir="$ladir/$objdir" absdir="$abs_ladir/$objdir" # Remove this search path later func_append notinst_path " $abs_ladir" fi fi # $installed = yes func_stripname 'lib' '.la' "$laname" name=$func_stripname_result # This library was specified with -dlpreopen. if test "$pass" = dlpreopen; then if test -z "$libdir" && test "$linkmode" = prog; then func_fatal_error "only libraries may -dlpreopen a convenience library: \`$lib'" fi case "$host" in # special handling for platforms with PE-DLLs. *cygwin* | *mingw* | *cegcc* ) # Linker will automatically link against shared library if both # static and shared are present. Therefore, ensure we extract # symbols from the import library if a shared library is present # (otherwise, the dlopen module name will be incorrect). We do # this by putting the import library name into $newdlprefiles. # We recover the dlopen module name by 'saving' the la file # name in a special purpose variable, and (later) extracting the # dlname from the la file. if test -n "$dlname"; then func_tr_sh "$dir/$linklib" eval "libfile_$func_tr_sh_result=\$abs_ladir/\$laname" func_append newdlprefiles " $dir/$linklib" else func_append newdlprefiles " $dir/$old_library" # Keep a list of preopened convenience libraries to check # that they are being used correctly in the link pass. test -z "$libdir" && \ func_append dlpreconveniencelibs " $dir/$old_library" fi ;; * ) # Prefer using a static library (so that no silly _DYNAMIC symbols # are required to link). if test -n "$old_library"; then func_append newdlprefiles " $dir/$old_library" # Keep a list of preopened convenience libraries to check # that they are being used correctly in the link pass. test -z "$libdir" && \ func_append dlpreconveniencelibs " $dir/$old_library" # Otherwise, use the dlname, so that lt_dlopen finds it. elif test -n "$dlname"; then func_append newdlprefiles " $dir/$dlname" else func_append newdlprefiles " $dir/$linklib" fi ;; esac fi # $pass = dlpreopen if test -z "$libdir"; then # Link the convenience library if test "$linkmode" = lib; then deplibs="$dir/$old_library $deplibs" elif test "$linkmode,$pass" = "prog,link"; then compile_deplibs="$dir/$old_library $compile_deplibs" finalize_deplibs="$dir/$old_library $finalize_deplibs" else deplibs="$lib $deplibs" # used for prog,scan pass fi continue fi if test "$linkmode" = prog && test "$pass" != link; then func_append newlib_search_path " $ladir" deplibs="$lib $deplibs" linkalldeplibs=no if test "$link_all_deplibs" != no || test -z "$library_names" || test "$build_libtool_libs" = no; then linkalldeplibs=yes fi tmp_libs= for deplib in $dependency_libs; do case $deplib in -L*) func_stripname '-L' '' "$deplib" func_resolve_sysroot "$func_stripname_result" func_append newlib_search_path " $func_resolve_sysroot_result" ;; esac # Need to link against all dependency_libs? if test "$linkalldeplibs" = yes; then deplibs="$deplib $deplibs" else # Need to hardcode shared library paths # or/and link against static libraries newdependency_libs="$deplib $newdependency_libs" fi if $opt_preserve_dup_deps ; then case "$tmp_libs " in *" $deplib "*) func_append specialdeplibs " $deplib" ;; esac fi func_append tmp_libs " $deplib" done # for deplib continue fi # $linkmode = prog... if test "$linkmode,$pass" = "prog,link"; then if test -n "$library_names" && { { test "$prefer_static_libs" = no || test "$prefer_static_libs,$installed" = "built,yes"; } || test -z "$old_library"; }; then # We need to hardcode the library path if test -n "$shlibpath_var" && test -z "$avoidtemprpath" ; then # Make sure the rpath contains only unique directories. case "$temp_rpath:" in *"$absdir:"*) ;; *) func_append temp_rpath "$absdir:" ;; esac fi # Hardcode the library path. # Skip directories that are in the system default run-time # search path. case " $sys_lib_dlsearch_path " in *" $absdir "*) ;; *) case "$compile_rpath " in *" $absdir "*) ;; *) func_append compile_rpath " $absdir" ;; esac ;; esac case " $sys_lib_dlsearch_path " in *" $libdir "*) ;; *) case "$finalize_rpath " in *" $libdir "*) ;; *) func_append finalize_rpath " $libdir" ;; esac ;; esac fi # $linkmode,$pass = prog,link... if test "$alldeplibs" = yes && { test "$deplibs_check_method" = pass_all || { test "$build_libtool_libs" = yes && test -n "$library_names"; }; }; then # We only need to search for static libraries continue fi fi link_static=no # Whether the deplib will be linked statically use_static_libs=$prefer_static_libs if test "$use_static_libs" = built && test "$installed" = yes; then use_static_libs=no fi if test -n "$library_names" && { test "$use_static_libs" = no || test -z "$old_library"; }; then case $host in *cygwin* | *mingw* | *cegcc*) # No point in relinking DLLs because paths are not encoded func_append notinst_deplibs " $lib" need_relink=no ;; *) if test "$installed" = no; then func_append notinst_deplibs " $lib" need_relink=yes fi ;; esac # This is a shared library # Warn about portability, can't link against -module's on some # systems (darwin). Don't bleat about dlopened modules though! dlopenmodule="" for dlpremoduletest in $dlprefiles; do if test "X$dlpremoduletest" = "X$lib"; then dlopenmodule="$dlpremoduletest" break fi done if test -z "$dlopenmodule" && test "$shouldnotlink" = yes && test "$pass" = link; then echo if test "$linkmode" = prog; then $ECHO "*** Warning: Linking the executable $output against the loadable module" else $ECHO "*** Warning: Linking the shared library $output against the loadable module" fi $ECHO "*** $linklib is not portable!" fi if test "$linkmode" = lib && test "$hardcode_into_libs" = yes; then # Hardcode the library path. # Skip directories that are in the system default run-time # search path. case " $sys_lib_dlsearch_path " in *" $absdir "*) ;; *) case "$compile_rpath " in *" $absdir "*) ;; *) func_append compile_rpath " $absdir" ;; esac ;; esac case " $sys_lib_dlsearch_path " in *" $libdir "*) ;; *) case "$finalize_rpath " in *" $libdir "*) ;; *) func_append finalize_rpath " $libdir" ;; esac ;; esac fi if test -n "$old_archive_from_expsyms_cmds"; then # figure out the soname set dummy $library_names shift realname="$1" shift libname=`eval "\\$ECHO \"$libname_spec\""` # use dlname if we got it. it's perfectly good, no? if test -n "$dlname"; then soname="$dlname" elif test -n "$soname_spec"; then # bleh windows case $host in *cygwin* | mingw* | *cegcc*) func_arith $current - $age major=$func_arith_result versuffix="-$major" ;; esac eval soname=\"$soname_spec\" else soname="$realname" fi # Make a new name for the extract_expsyms_cmds to use soroot="$soname" func_basename "$soroot" soname="$func_basename_result" func_stripname 'lib' '.dll' "$soname" newlib=libimp-$func_stripname_result.a # If the library has no export list, then create one now if test -f "$output_objdir/$soname-def"; then : else func_verbose "extracting exported symbol list from \`$soname'" func_execute_cmds "$extract_expsyms_cmds" 'exit $?' fi # Create $newlib if test -f "$output_objdir/$newlib"; then :; else func_verbose "generating import library for \`$soname'" func_execute_cmds "$old_archive_from_expsyms_cmds" 'exit $?' fi # make sure the library variables are pointing to the new library dir=$output_objdir linklib=$newlib fi # test -n "$old_archive_from_expsyms_cmds" if test "$linkmode" = prog || test "$opt_mode" != relink; then add_shlibpath= add_dir= add= lib_linked=yes case $hardcode_action in immediate | unsupported) if test "$hardcode_direct" = no; then add="$dir/$linklib" case $host in *-*-sco3.2v5.0.[024]*) add_dir="-L$dir" ;; *-*-sysv4*uw2*) add_dir="-L$dir" ;; *-*-sysv5OpenUNIX* | *-*-sysv5UnixWare7.[01].[10]* | \ *-*-unixware7*) add_dir="-L$dir" ;; *-*-darwin* ) # if the lib is a (non-dlopened) module then we can not # link against it, someone is ignoring the earlier warnings if /usr/bin/file -L $add 2> /dev/null | $GREP ": [^:]* bundle" >/dev/null ; then if test "X$dlopenmodule" != "X$lib"; then $ECHO "*** Warning: lib $linklib is a module, not a shared library" if test -z "$old_library" ; then echo echo "*** And there doesn't seem to be a static archive available" echo "*** The link will probably fail, sorry" else add="$dir/$old_library" fi elif test -n "$old_library"; then add="$dir/$old_library" fi fi esac elif test "$hardcode_minus_L" = no; then case $host in *-*-sunos*) add_shlibpath="$dir" ;; esac add_dir="-L$dir" add="-l$name" elif test "$hardcode_shlibpath_var" = no; then add_shlibpath="$dir" add="-l$name" else lib_linked=no fi ;; relink) if test "$hardcode_direct" = yes && test "$hardcode_direct_absolute" = no; then add="$dir/$linklib" elif test "$hardcode_minus_L" = yes; then add_dir="-L$absdir" # Try looking first in the location we're being installed to. if test -n "$inst_prefix_dir"; then case $libdir in [\\/]*) func_append add_dir " -L$inst_prefix_dir$libdir" ;; esac fi add="-l$name" elif test "$hardcode_shlibpath_var" = yes; then add_shlibpath="$dir" add="-l$name" else lib_linked=no fi ;; *) lib_linked=no ;; esac if test "$lib_linked" != yes; then func_fatal_configuration "unsupported hardcode properties" fi if test -n "$add_shlibpath"; then case :$compile_shlibpath: in *":$add_shlibpath:"*) ;; *) func_append compile_shlibpath "$add_shlibpath:" ;; esac fi if test "$linkmode" = prog; then test -n "$add_dir" && compile_deplibs="$add_dir $compile_deplibs" test -n "$add" && compile_deplibs="$add $compile_deplibs" else test -n "$add_dir" && deplibs="$add_dir $deplibs" test -n "$add" && deplibs="$add $deplibs" if test "$hardcode_direct" != yes && test "$hardcode_minus_L" != yes && test "$hardcode_shlibpath_var" = yes; then case :$finalize_shlibpath: in *":$libdir:"*) ;; *) func_append finalize_shlibpath "$libdir:" ;; esac fi fi fi if test "$linkmode" = prog || test "$opt_mode" = relink; then add_shlibpath= add_dir= add= # Finalize command for both is simple: just hardcode it. if test "$hardcode_direct" = yes && test "$hardcode_direct_absolute" = no; then add="$libdir/$linklib" elif test "$hardcode_minus_L" = yes; then add_dir="-L$libdir" add="-l$name" elif test "$hardcode_shlibpath_var" = yes; then case :$finalize_shlibpath: in *":$libdir:"*) ;; *) func_append finalize_shlibpath "$libdir:" ;; esac add="-l$name" elif test "$hardcode_automatic" = yes; then if test -n "$inst_prefix_dir" && test -f "$inst_prefix_dir$libdir/$linklib" ; then add="$inst_prefix_dir$libdir/$linklib" else add="$libdir/$linklib" fi else # We cannot seem to hardcode it, guess we'll fake it. add_dir="-L$libdir" # Try looking first in the location we're being installed to. if test -n "$inst_prefix_dir"; then case $libdir in [\\/]*) func_append add_dir " -L$inst_prefix_dir$libdir" ;; esac fi add="-l$name" fi if test "$linkmode" = prog; then test -n "$add_dir" && finalize_deplibs="$add_dir $finalize_deplibs" test -n "$add" && finalize_deplibs="$add $finalize_deplibs" else test -n "$add_dir" && deplibs="$add_dir $deplibs" test -n "$add" && deplibs="$add $deplibs" fi fi elif test "$linkmode" = prog; then # Here we assume that one of hardcode_direct or hardcode_minus_L # is not unsupported. This is valid on all known static and # shared platforms. if test "$hardcode_direct" != unsupported; then test -n "$old_library" && linklib="$old_library" compile_deplibs="$dir/$linklib $compile_deplibs" finalize_deplibs="$dir/$linklib $finalize_deplibs" else compile_deplibs="-l$name -L$dir $compile_deplibs" finalize_deplibs="-l$name -L$dir $finalize_deplibs" fi elif test "$build_libtool_libs" = yes; then # Not a shared library if test "$deplibs_check_method" != pass_all; then # We're trying link a shared library against a static one # but the system doesn't support it. # Just print a warning and add the library to dependency_libs so # that the program can be linked against the static library. echo $ECHO "*** Warning: This system can not link to static lib archive $lib." echo "*** I have the capability to make that library automatically link in when" echo "*** you link to this library. But I can only do this if you have a" echo "*** shared version of the library, which you do not appear to have." if test "$module" = yes; then echo "*** But as you try to build a module library, libtool will still create " echo "*** a static module, that should work as long as the dlopening application" echo "*** is linked with the -dlopen flag to resolve symbols at runtime." if test -z "$global_symbol_pipe"; then echo echo "*** However, this would only work if libtool was able to extract symbol" echo "*** lists from a program, using \`nm' or equivalent, but libtool could" echo "*** not find such a program. So, this module is probably useless." echo "*** \`nm' from GNU binutils and a full rebuild may help." fi if test "$build_old_libs" = no; then build_libtool_libs=module build_old_libs=yes else build_libtool_libs=no fi fi else deplibs="$dir/$old_library $deplibs" link_static=yes fi fi # link shared/static library? if test "$linkmode" = lib; then if test -n "$dependency_libs" && { test "$hardcode_into_libs" != yes || test "$build_old_libs" = yes || test "$link_static" = yes; }; then # Extract -R from dependency_libs temp_deplibs= for libdir in $dependency_libs; do case $libdir in -R*) func_stripname '-R' '' "$libdir" temp_xrpath=$func_stripname_result case " $xrpath " in *" $temp_xrpath "*) ;; *) func_append xrpath " $temp_xrpath";; esac;; *) func_append temp_deplibs " $libdir";; esac done dependency_libs="$temp_deplibs" fi func_append newlib_search_path " $absdir" # Link against this library test "$link_static" = no && newdependency_libs="$abs_ladir/$laname $newdependency_libs" # ... and its dependency_libs tmp_libs= for deplib in $dependency_libs; do newdependency_libs="$deplib $newdependency_libs" case $deplib in -L*) func_stripname '-L' '' "$deplib" func_resolve_sysroot "$func_stripname_result";; *) func_resolve_sysroot "$deplib" ;; esac if $opt_preserve_dup_deps ; then case "$tmp_libs " in *" $func_resolve_sysroot_result "*) func_append specialdeplibs " $func_resolve_sysroot_result" ;; esac fi func_append tmp_libs " $func_resolve_sysroot_result" done if test "$link_all_deplibs" != no; then # Add the search paths of all dependency libraries for deplib in $dependency_libs; do path= case $deplib in -L*) path="$deplib" ;; *.la) func_resolve_sysroot "$deplib" deplib=$func_resolve_sysroot_result func_dirname "$deplib" "" "." dir=$func_dirname_result # We need an absolute path. case $dir in [\\/]* | [A-Za-z]:[\\/]*) absdir="$dir" ;; *) absdir=`cd "$dir" && pwd` if test -z "$absdir"; then func_warning "cannot determine absolute directory name of \`$dir'" absdir="$dir" fi ;; esac if $GREP "^installed=no" $deplib > /dev/null; then case $host in *-*-darwin*) depdepl= eval deplibrary_names=`${SED} -n -e 's/^library_names=\(.*\)$/\1/p' $deplib` if test -n "$deplibrary_names" ; then for tmp in $deplibrary_names ; do depdepl=$tmp done if test -f "$absdir/$objdir/$depdepl" ; then depdepl="$absdir/$objdir/$depdepl" darwin_install_name=`${OTOOL} -L $depdepl | awk '{if (NR == 2) {print $1;exit}}'` if test -z "$darwin_install_name"; then darwin_install_name=`${OTOOL64} -L $depdepl | awk '{if (NR == 2) {print $1;exit}}'` fi func_append compiler_flags " ${wl}-dylib_file ${wl}${darwin_install_name}:${depdepl}" func_append linker_flags " -dylib_file ${darwin_install_name}:${depdepl}" path= fi fi ;; *) path="-L$absdir/$objdir" ;; esac else eval libdir=`${SED} -n -e 's/^libdir=\(.*\)$/\1/p' $deplib` test -z "$libdir" && \ func_fatal_error "\`$deplib' is not a valid libtool archive" test "$absdir" != "$libdir" && \ func_warning "\`$deplib' seems to be moved" path="-L$absdir" fi ;; esac case " $deplibs " in *" $path "*) ;; *) deplibs="$path $deplibs" ;; esac done fi # link_all_deplibs != no fi # linkmode = lib done # for deplib in $libs if test "$pass" = link; then if test "$linkmode" = "prog"; then compile_deplibs="$new_inherited_linker_flags $compile_deplibs" finalize_deplibs="$new_inherited_linker_flags $finalize_deplibs" else compiler_flags="$compiler_flags "`$ECHO " $new_inherited_linker_flags" | $SED 's% \([^ $]*\).ltframework% -framework \1%g'` fi fi dependency_libs="$newdependency_libs" if test "$pass" = dlpreopen; then # Link the dlpreopened libraries before other libraries for deplib in $save_deplibs; do deplibs="$deplib $deplibs" done fi if test "$pass" != dlopen; then if test "$pass" != conv; then # Make sure lib_search_path contains only unique directories. lib_search_path= for dir in $newlib_search_path; do case "$lib_search_path " in *" $dir "*) ;; *) func_append lib_search_path " $dir" ;; esac done newlib_search_path= fi if test "$linkmode,$pass" != "prog,link"; then vars="deplibs" else vars="compile_deplibs finalize_deplibs" fi for var in $vars dependency_libs; do # Add libraries to $var in reverse order eval tmp_libs=\"\$$var\" new_libs= for deplib in $tmp_libs; do # FIXME: Pedantically, this is the right thing to do, so # that some nasty dependency loop isn't accidentally # broken: #new_libs="$deplib $new_libs" # Pragmatically, this seems to cause very few problems in # practice: case $deplib in -L*) new_libs="$deplib $new_libs" ;; -R*) ;; *) # And here is the reason: when a library appears more # than once as an explicit dependence of a library, or # is implicitly linked in more than once by the # compiler, it is considered special, and multiple # occurrences thereof are not removed. Compare this # with having the same library being listed as a # dependency of multiple other libraries: in this case, # we know (pedantically, we assume) the library does not # need to be listed more than once, so we keep only the # last copy. This is not always right, but it is rare # enough that we require users that really mean to play # such unportable linking tricks to link the library # using -Wl,-lname, so that libtool does not consider it # for duplicate removal. case " $specialdeplibs " in *" $deplib "*) new_libs="$deplib $new_libs" ;; *) case " $new_libs " in *" $deplib "*) ;; *) new_libs="$deplib $new_libs" ;; esac ;; esac ;; esac done tmp_libs= for deplib in $new_libs; do case $deplib in -L*) case " $tmp_libs " in *" $deplib "*) ;; *) func_append tmp_libs " $deplib" ;; esac ;; *) func_append tmp_libs " $deplib" ;; esac done eval $var=\"$tmp_libs\" done # for var fi # Last step: remove runtime libs from dependency_libs # (they stay in deplibs) tmp_libs= for i in $dependency_libs ; do case " $predeps $postdeps $compiler_lib_search_path " in *" $i "*) i="" ;; esac if test -n "$i" ; then func_append tmp_libs " $i" fi done dependency_libs=$tmp_libs done # for pass if test "$linkmode" = prog; then dlfiles="$newdlfiles" fi if test "$linkmode" = prog || test "$linkmode" = lib; then dlprefiles="$newdlprefiles" fi case $linkmode in oldlib) if test -n "$dlfiles$dlprefiles" || test "$dlself" != no; then func_warning "\`-dlopen' is ignored for archives" fi case " $deplibs" in *\ -l* | *\ -L*) func_warning "\`-l' and \`-L' are ignored for archives" ;; esac test -n "$rpath" && \ func_warning "\`-rpath' is ignored for archives" test -n "$xrpath" && \ func_warning "\`-R' is ignored for archives" test -n "$vinfo" && \ func_warning "\`-version-info/-version-number' is ignored for archives" test -n "$release" && \ func_warning "\`-release' is ignored for archives" test -n "$export_symbols$export_symbols_regex" && \ func_warning "\`-export-symbols' is ignored for archives" # Now set the variables for building old libraries. build_libtool_libs=no oldlibs="$output" func_append objs "$old_deplibs" ;; lib) # Make sure we only generate libraries of the form `libNAME.la'. case $outputname in lib*) func_stripname 'lib' '.la' "$outputname" name=$func_stripname_result eval shared_ext=\"$shrext_cmds\" eval libname=\"$libname_spec\" ;; *) test "$module" = no && \ func_fatal_help "libtool library \`$output' must begin with \`lib'" if test "$need_lib_prefix" != no; then # Add the "lib" prefix for modules if required func_stripname '' '.la' "$outputname" name=$func_stripname_result eval shared_ext=\"$shrext_cmds\" eval libname=\"$libname_spec\" else func_stripname '' '.la' "$outputname" libname=$func_stripname_result fi ;; esac if test -n "$objs"; then if test "$deplibs_check_method" != pass_all; then func_fatal_error "cannot build libtool library \`$output' from non-libtool objects on this host:$objs" else echo $ECHO "*** Warning: Linking the shared library $output against the non-libtool" $ECHO "*** objects $objs is not portable!" func_append libobjs " $objs" fi fi test "$dlself" != no && \ func_warning "\`-dlopen self' is ignored for libtool libraries" set dummy $rpath shift test "$#" -gt 1 && \ func_warning "ignoring multiple \`-rpath's for a libtool library" install_libdir="$1" oldlibs= if test -z "$rpath"; then if test "$build_libtool_libs" = yes; then # Building a libtool convenience library. # Some compilers have problems with a `.al' extension so # convenience libraries should have the same extension an # archive normally would. oldlibs="$output_objdir/$libname.$libext $oldlibs" build_libtool_libs=convenience build_old_libs=yes fi test -n "$vinfo" && \ func_warning "\`-version-info/-version-number' is ignored for convenience libraries" test -n "$release" && \ func_warning "\`-release' is ignored for convenience libraries" else # Parse the version information argument. save_ifs="$IFS"; IFS=':' set dummy $vinfo 0 0 0 shift IFS="$save_ifs" test -n "$7" && \ func_fatal_help "too many parameters to \`-version-info'" # convert absolute version numbers to libtool ages # this retains compatibility with .la files and attempts # to make the code below a bit more comprehensible case $vinfo_number in yes) number_major="$1" number_minor="$2" number_revision="$3" # # There are really only two kinds -- those that # use the current revision as the major version # and those that subtract age and use age as # a minor version. But, then there is irix # which has an extra 1 added just for fun # case $version_type in # correct linux to gnu/linux during the next big refactor darwin|linux|osf|windows|none) func_arith $number_major + $number_minor current=$func_arith_result age="$number_minor" revision="$number_revision" ;; freebsd-aout|freebsd-elf|qnx|sunos) current="$number_major" revision="$number_minor" age="0" ;; irix|nonstopux) func_arith $number_major + $number_minor current=$func_arith_result age="$number_minor" revision="$number_minor" lt_irix_increment=no ;; esac ;; no) current="$1" revision="$2" age="$3" ;; esac # Check that each of the things are valid numbers. case $current in 0|[1-9]|[1-9][0-9]|[1-9][0-9][0-9]|[1-9][0-9][0-9][0-9]|[1-9][0-9][0-9][0-9][0-9]) ;; *) func_error "CURRENT \`$current' must be a nonnegative integer" func_fatal_error "\`$vinfo' is not valid version information" ;; esac case $revision in 0|[1-9]|[1-9][0-9]|[1-9][0-9][0-9]|[1-9][0-9][0-9][0-9]|[1-9][0-9][0-9][0-9][0-9]) ;; *) func_error "REVISION \`$revision' must be a nonnegative integer" func_fatal_error "\`$vinfo' is not valid version information" ;; esac case $age in 0|[1-9]|[1-9][0-9]|[1-9][0-9][0-9]|[1-9][0-9][0-9][0-9]|[1-9][0-9][0-9][0-9][0-9]) ;; *) func_error "AGE \`$age' must be a nonnegative integer" func_fatal_error "\`$vinfo' is not valid version information" ;; esac if test "$age" -gt "$current"; then func_error "AGE \`$age' is greater than the current interface number \`$current'" func_fatal_error "\`$vinfo' is not valid version information" fi # Calculate the version variables. major= versuffix= verstring= case $version_type in none) ;; darwin) # Like Linux, but with the current version available in # verstring for coding it into the library header func_arith $current - $age major=.$func_arith_result versuffix="$major.$age.$revision" # Darwin ld doesn't like 0 for these options... func_arith $current + 1 minor_current=$func_arith_result xlcverstring="${wl}-compatibility_version ${wl}$minor_current ${wl}-current_version ${wl}$minor_current.$revision" verstring="-compatibility_version $minor_current -current_version $minor_current.$revision" ;; freebsd-aout) major=".$current" versuffix=".$current.$revision"; ;; freebsd-elf) major=".$current" versuffix=".$current" ;; irix | nonstopux) if test "X$lt_irix_increment" = "Xno"; then func_arith $current - $age else func_arith $current - $age + 1 fi major=$func_arith_result case $version_type in nonstopux) verstring_prefix=nonstopux ;; *) verstring_prefix=sgi ;; esac verstring="$verstring_prefix$major.$revision" # Add in all the interfaces that we are compatible with. loop=$revision while test "$loop" -ne 0; do func_arith $revision - $loop iface=$func_arith_result func_arith $loop - 1 loop=$func_arith_result verstring="$verstring_prefix$major.$iface:$verstring" done # Before this point, $major must not contain `.'. major=.$major versuffix="$major.$revision" ;; linux) # correct to gnu/linux during the next big refactor func_arith $current - $age major=.$func_arith_result versuffix="$major.$age.$revision" ;; osf) func_arith $current - $age major=.$func_arith_result versuffix=".$current.$age.$revision" verstring="$current.$age.$revision" # Add in all the interfaces that we are compatible with. loop=$age while test "$loop" -ne 0; do func_arith $current - $loop iface=$func_arith_result func_arith $loop - 1 loop=$func_arith_result verstring="$verstring:${iface}.0" done # Make executables depend on our current version. func_append verstring ":${current}.0" ;; qnx) major=".$current" versuffix=".$current" ;; sunos) major=".$current" versuffix=".$current.$revision" ;; windows) # Use '-' rather than '.', since we only want one # extension on DOS 8.3 filesystems. func_arith $current - $age major=$func_arith_result versuffix="-$major" ;; *) func_fatal_configuration "unknown library version type \`$version_type'" ;; esac # Clear the version info if we defaulted, and they specified a release. if test -z "$vinfo" && test -n "$release"; then major= case $version_type in darwin) # we can't check for "0.0" in archive_cmds due to quoting # problems, so we reset it completely verstring= ;; *) verstring="0.0" ;; esac if test "$need_version" = no; then versuffix= else versuffix=".0.0" fi fi # Remove version info from name if versioning should be avoided if test "$avoid_version" = yes && test "$need_version" = no; then major= versuffix= verstring="" fi # Check to see if the archive will have undefined symbols. if test "$allow_undefined" = yes; then if test "$allow_undefined_flag" = unsupported; then func_warning "undefined symbols not allowed in $host shared libraries" build_libtool_libs=no build_old_libs=yes fi else # Don't allow undefined symbols. allow_undefined_flag="$no_undefined_flag" fi fi func_generate_dlsyms "$libname" "$libname" "yes" func_append libobjs " $symfileobj" test "X$libobjs" = "X " && libobjs= if test "$opt_mode" != relink; then # Remove our outputs, but don't remove object files since they # may have been created when compiling PIC objects. removelist= tempremovelist=`$ECHO "$output_objdir/*"` for p in $tempremovelist; do case $p in *.$objext | *.gcno) ;; $output_objdir/$outputname | $output_objdir/$libname.* | $output_objdir/${libname}${release}.*) if test "X$precious_files_regex" != "X"; then if $ECHO "$p" | $EGREP -e "$precious_files_regex" >/dev/null 2>&1 then continue fi fi func_append removelist " $p" ;; *) ;; esac done test -n "$removelist" && \ func_show_eval "${RM}r \$removelist" fi # Now set the variables for building old libraries. if test "$build_old_libs" = yes && test "$build_libtool_libs" != convenience ; then func_append oldlibs " $output_objdir/$libname.$libext" # Transform .lo files to .o files. oldobjs="$objs "`$ECHO "$libobjs" | $SP2NL | $SED "/\.${libext}$/d; $lo2o" | $NL2SP` fi # Eliminate all temporary directories. #for path in $notinst_path; do # lib_search_path=`$ECHO "$lib_search_path " | $SED "s% $path % %g"` # deplibs=`$ECHO "$deplibs " | $SED "s% -L$path % %g"` # dependency_libs=`$ECHO "$dependency_libs " | $SED "s% -L$path % %g"` #done if test -n "$xrpath"; then # If the user specified any rpath flags, then add them. temp_xrpath= for libdir in $xrpath; do func_replace_sysroot "$libdir" func_append temp_xrpath " -R$func_replace_sysroot_result" case "$finalize_rpath " in *" $libdir "*) ;; *) func_append finalize_rpath " $libdir" ;; esac done if test "$hardcode_into_libs" != yes || test "$build_old_libs" = yes; then dependency_libs="$temp_xrpath $dependency_libs" fi fi # Make sure dlfiles contains only unique files that won't be dlpreopened old_dlfiles="$dlfiles" dlfiles= for lib in $old_dlfiles; do case " $dlprefiles $dlfiles " in *" $lib "*) ;; *) func_append dlfiles " $lib" ;; esac done # Make sure dlprefiles contains only unique files old_dlprefiles="$dlprefiles" dlprefiles= for lib in $old_dlprefiles; do case "$dlprefiles " in *" $lib "*) ;; *) func_append dlprefiles " $lib" ;; esac done if test "$build_libtool_libs" = yes; then if test -n "$rpath"; then case $host in *-*-cygwin* | *-*-mingw* | *-*-pw32* | *-*-os2* | *-*-beos* | *-cegcc* | *-*-haiku*) # these systems don't actually have a c library (as such)! ;; *-*-rhapsody* | *-*-darwin1.[012]) # Rhapsody C library is in the System framework func_append deplibs " System.ltframework" ;; *-*-netbsd*) # Don't link with libc until the a.out ld.so is fixed. ;; *-*-openbsd* | *-*-freebsd* | *-*-dragonfly*) # Do not include libc due to us having libc/libc_r. ;; *-*-sco3.2v5* | *-*-sco5v6*) # Causes problems with __ctype ;; *-*-sysv4.2uw2* | *-*-sysv5* | *-*-unixware* | *-*-OpenUNIX*) # Compiler inserts libc in the correct place for threads to work ;; *) # Add libc to deplibs on all other systems if necessary. if test "$build_libtool_need_lc" = "yes"; then func_append deplibs " -lc" fi ;; esac fi # Transform deplibs into only deplibs that can be linked in shared. name_save=$name libname_save=$libname release_save=$release versuffix_save=$versuffix major_save=$major # I'm not sure if I'm treating the release correctly. I think # release should show up in the -l (ie -lgmp5) so we don't want to # add it in twice. Is that correct? release="" versuffix="" major="" newdeplibs= droppeddeps=no case $deplibs_check_method in pass_all) # Don't check for shared/static. Everything works. # This might be a little naive. We might want to check # whether the library exists or not. But this is on # osf3 & osf4 and I'm not really sure... Just # implementing what was already the behavior. newdeplibs=$deplibs ;; test_compile) # This code stresses the "libraries are programs" paradigm to its # limits. Maybe even breaks it. We compile a program, linking it # against the deplibs as a proxy for the library. Then we can check # whether they linked in statically or dynamically with ldd. $opt_dry_run || $RM conftest.c cat > conftest.c </dev/null` $nocaseglob else potential_libs=`ls $i/$libnameglob[.-]* 2>/dev/null` fi for potent_lib in $potential_libs; do # Follow soft links. if ls -lLd "$potent_lib" 2>/dev/null | $GREP " -> " >/dev/null; then continue fi # The statement above tries to avoid entering an # endless loop below, in case of cyclic links. # We might still enter an endless loop, since a link # loop can be closed while we follow links, # but so what? potlib="$potent_lib" while test -h "$potlib" 2>/dev/null; do potliblink=`ls -ld $potlib | ${SED} 's/.* -> //'` case $potliblink in [\\/]* | [A-Za-z]:[\\/]*) potlib="$potliblink";; *) potlib=`$ECHO "$potlib" | $SED 's,[^/]*$,,'`"$potliblink";; esac done if eval $file_magic_cmd \"\$potlib\" 2>/dev/null | $SED -e 10q | $EGREP "$file_magic_regex" > /dev/null; then func_append newdeplibs " $a_deplib" a_deplib="" break 2 fi done done fi if test -n "$a_deplib" ; then droppeddeps=yes echo $ECHO "*** Warning: linker path does not have real file for library $a_deplib." echo "*** I have the capability to make that library automatically link in when" echo "*** you link to this library. But I can only do this if you have a" echo "*** shared version of the library, which you do not appear to have" echo "*** because I did check the linker path looking for a file starting" if test -z "$potlib" ; then $ECHO "*** with $libname but no candidates were found. (...for file magic test)" else $ECHO "*** with $libname and none of the candidates passed a file format test" $ECHO "*** using a file magic. Last file checked: $potlib" fi fi ;; *) # Add a -L argument. func_append newdeplibs " $a_deplib" ;; esac done # Gone through all deplibs. ;; match_pattern*) set dummy $deplibs_check_method; shift match_pattern_regex=`expr "$deplibs_check_method" : "$1 \(.*\)"` for a_deplib in $deplibs; do case $a_deplib in -l*) func_stripname -l '' "$a_deplib" name=$func_stripname_result if test "X$allow_libtool_libs_with_static_runtimes" = "Xyes" ; then case " $predeps $postdeps " in *" $a_deplib "*) func_append newdeplibs " $a_deplib" a_deplib="" ;; esac fi if test -n "$a_deplib" ; then libname=`eval "\\$ECHO \"$libname_spec\""` for i in $lib_search_path $sys_lib_search_path $shlib_search_path; do potential_libs=`ls $i/$libname[.-]* 2>/dev/null` for potent_lib in $potential_libs; do potlib="$potent_lib" # see symlink-check above in file_magic test if eval "\$ECHO \"$potent_lib\"" 2>/dev/null | $SED 10q | \ $EGREP "$match_pattern_regex" > /dev/null; then func_append newdeplibs " $a_deplib" a_deplib="" break 2 fi done done fi if test -n "$a_deplib" ; then droppeddeps=yes echo $ECHO "*** Warning: linker path does not have real file for library $a_deplib." echo "*** I have the capability to make that library automatically link in when" echo "*** you link to this library. But I can only do this if you have a" echo "*** shared version of the library, which you do not appear to have" echo "*** because I did check the linker path looking for a file starting" if test -z "$potlib" ; then $ECHO "*** with $libname but no candidates were found. (...for regex pattern test)" else $ECHO "*** with $libname and none of the candidates passed a file format test" $ECHO "*** using a regex pattern. Last file checked: $potlib" fi fi ;; *) # Add a -L argument. func_append newdeplibs " $a_deplib" ;; esac done # Gone through all deplibs. ;; none | unknown | *) newdeplibs="" tmp_deplibs=`$ECHO " $deplibs" | $SED 's/ -lc$//; s/ -[LR][^ ]*//g'` if test "X$allow_libtool_libs_with_static_runtimes" = "Xyes" ; then for i in $predeps $postdeps ; do # can't use Xsed below, because $i might contain '/' tmp_deplibs=`$ECHO " $tmp_deplibs" | $SED "s,$i,,"` done fi case $tmp_deplibs in *[!\ \ ]*) echo if test "X$deplibs_check_method" = "Xnone"; then echo "*** Warning: inter-library dependencies are not supported in this platform." else echo "*** Warning: inter-library dependencies are not known to be supported." fi echo "*** All declared inter-library dependencies are being dropped." droppeddeps=yes ;; esac ;; esac versuffix=$versuffix_save major=$major_save release=$release_save libname=$libname_save name=$name_save case $host in *-*-rhapsody* | *-*-darwin1.[012]) # On Rhapsody replace the C library with the System framework newdeplibs=`$ECHO " $newdeplibs" | $SED 's/ -lc / System.ltframework /'` ;; esac if test "$droppeddeps" = yes; then if test "$module" = yes; then echo echo "*** Warning: libtool could not satisfy all declared inter-library" $ECHO "*** dependencies of module $libname. Therefore, libtool will create" echo "*** a static module, that should work as long as the dlopening" echo "*** application is linked with the -dlopen flag." if test -z "$global_symbol_pipe"; then echo echo "*** However, this would only work if libtool was able to extract symbol" echo "*** lists from a program, using \`nm' or equivalent, but libtool could" echo "*** not find such a program. So, this module is probably useless." echo "*** \`nm' from GNU binutils and a full rebuild may help." fi if test "$build_old_libs" = no; then oldlibs="$output_objdir/$libname.$libext" build_libtool_libs=module build_old_libs=yes else build_libtool_libs=no fi else echo "*** The inter-library dependencies that have been dropped here will be" echo "*** automatically added whenever a program is linked with this library" echo "*** or is declared to -dlopen it." if test "$allow_undefined" = no; then echo echo "*** Since this library must not contain undefined symbols," echo "*** because either the platform does not support them or" echo "*** it was explicitly requested with -no-undefined," echo "*** libtool will only create a static version of it." if test "$build_old_libs" = no; then oldlibs="$output_objdir/$libname.$libext" build_libtool_libs=module build_old_libs=yes else build_libtool_libs=no fi fi fi fi # Done checking deplibs! deplibs=$newdeplibs fi # Time to change all our "foo.ltframework" stuff back to "-framework foo" case $host in *-*-darwin*) newdeplibs=`$ECHO " $newdeplibs" | $SED 's% \([^ $]*\).ltframework% -framework \1%g'` new_inherited_linker_flags=`$ECHO " $new_inherited_linker_flags" | $SED 's% \([^ $]*\).ltframework% -framework \1%g'` deplibs=`$ECHO " $deplibs" | $SED 's% \([^ $]*\).ltframework% -framework \1%g'` ;; esac # move library search paths that coincide with paths to not yet # installed libraries to the beginning of the library search list new_libs= for path in $notinst_path; do case " $new_libs " in *" -L$path/$objdir "*) ;; *) case " $deplibs " in *" -L$path/$objdir "*) func_append new_libs " -L$path/$objdir" ;; esac ;; esac done for deplib in $deplibs; do case $deplib in -L*) case " $new_libs " in *" $deplib "*) ;; *) func_append new_libs " $deplib" ;; esac ;; *) func_append new_libs " $deplib" ;; esac done deplibs="$new_libs" # All the library-specific variables (install_libdir is set above). library_names= old_library= dlname= # Test again, we may have decided not to build it any more if test "$build_libtool_libs" = yes; then # Remove ${wl} instances when linking with ld. # FIXME: should test the right _cmds variable. case $archive_cmds in *\$LD\ *) wl= ;; esac if test "$hardcode_into_libs" = yes; then # Hardcode the library paths hardcode_libdirs= dep_rpath= rpath="$finalize_rpath" test "$opt_mode" != relink && rpath="$compile_rpath$rpath" for libdir in $rpath; do if test -n "$hardcode_libdir_flag_spec"; then if test -n "$hardcode_libdir_separator"; then func_replace_sysroot "$libdir" libdir=$func_replace_sysroot_result if test -z "$hardcode_libdirs"; then hardcode_libdirs="$libdir" else # Just accumulate the unique libdirs. case $hardcode_libdir_separator$hardcode_libdirs$hardcode_libdir_separator in *"$hardcode_libdir_separator$libdir$hardcode_libdir_separator"*) ;; *) func_append hardcode_libdirs "$hardcode_libdir_separator$libdir" ;; esac fi else eval flag=\"$hardcode_libdir_flag_spec\" func_append dep_rpath " $flag" fi elif test -n "$runpath_var"; then case "$perm_rpath " in *" $libdir "*) ;; *) func_append perm_rpath " $libdir" ;; esac fi done # Substitute the hardcoded libdirs into the rpath. if test -n "$hardcode_libdir_separator" && test -n "$hardcode_libdirs"; then libdir="$hardcode_libdirs" eval "dep_rpath=\"$hardcode_libdir_flag_spec\"" fi if test -n "$runpath_var" && test -n "$perm_rpath"; then # We should set the runpath_var. rpath= for dir in $perm_rpath; do func_append rpath "$dir:" done eval "$runpath_var='$rpath\$$runpath_var'; export $runpath_var" fi test -n "$dep_rpath" && deplibs="$dep_rpath $deplibs" fi shlibpath="$finalize_shlibpath" test "$opt_mode" != relink && shlibpath="$compile_shlibpath$shlibpath" if test -n "$shlibpath"; then eval "$shlibpath_var='$shlibpath\$$shlibpath_var'; export $shlibpath_var" fi # Get the real and link names of the library. eval shared_ext=\"$shrext_cmds\" eval library_names=\"$library_names_spec\" set dummy $library_names shift realname="$1" shift if test -n "$soname_spec"; then eval soname=\"$soname_spec\" else soname="$realname" fi if test -z "$dlname"; then dlname=$soname fi lib="$output_objdir/$realname" linknames= for link do func_append linknames " $link" done # Use standard objects if they are pic test -z "$pic_flag" && libobjs=`$ECHO "$libobjs" | $SP2NL | $SED "$lo2o" | $NL2SP` test "X$libobjs" = "X " && libobjs= delfiles= if test -n "$export_symbols" && test -n "$include_expsyms"; then $opt_dry_run || cp "$export_symbols" "$output_objdir/$libname.uexp" export_symbols="$output_objdir/$libname.uexp" func_append delfiles " $export_symbols" fi orig_export_symbols= case $host_os in cygwin* | mingw* | cegcc*) if test -n "$export_symbols" && test -z "$export_symbols_regex"; then # exporting using user supplied symfile if test "x`$SED 1q $export_symbols`" != xEXPORTS; then # and it's NOT already a .def file. Must figure out # which of the given symbols are data symbols and tag # them as such. So, trigger use of export_symbols_cmds. # export_symbols gets reassigned inside the "prepare # the list of exported symbols" if statement, so the # include_expsyms logic still works. orig_export_symbols="$export_symbols" export_symbols= always_export_symbols=yes fi fi ;; esac # Prepare the list of exported symbols if test -z "$export_symbols"; then if test "$always_export_symbols" = yes || test -n "$export_symbols_regex"; then func_verbose "generating symbol list for \`$libname.la'" export_symbols="$output_objdir/$libname.exp" $opt_dry_run || $RM $export_symbols cmds=$export_symbols_cmds save_ifs="$IFS"; IFS='~' for cmd1 in $cmds; do IFS="$save_ifs" # Take the normal branch if the nm_file_list_spec branch # doesn't work or if tool conversion is not needed. case $nm_file_list_spec~$to_tool_file_cmd in *~func_convert_file_noop | *~func_convert_file_msys_to_w32 | ~*) try_normal_branch=yes eval cmd=\"$cmd1\" func_len " $cmd" len=$func_len_result ;; *) try_normal_branch=no ;; esac if test "$try_normal_branch" = yes \ && { test "$len" -lt "$max_cmd_len" \ || test "$max_cmd_len" -le -1; } then func_show_eval "$cmd" 'exit $?' skipped_export=false elif test -n "$nm_file_list_spec"; then func_basename "$output" output_la=$func_basename_result save_libobjs=$libobjs save_output=$output output=${output_objdir}/${output_la}.nm func_to_tool_file "$output" libobjs=$nm_file_list_spec$func_to_tool_file_result func_append delfiles " $output" func_verbose "creating $NM input file list: $output" for obj in $save_libobjs; do func_to_tool_file "$obj" $ECHO "$func_to_tool_file_result" done > "$output" eval cmd=\"$cmd1\" func_show_eval "$cmd" 'exit $?' output=$save_output libobjs=$save_libobjs skipped_export=false else # The command line is too long to execute in one step. func_verbose "using reloadable object file for export list..." skipped_export=: # Break out early, otherwise skipped_export may be # set to false by a later but shorter cmd. break fi done IFS="$save_ifs" if test -n "$export_symbols_regex" && test "X$skipped_export" != "X:"; then func_show_eval '$EGREP -e "$export_symbols_regex" "$export_symbols" > "${export_symbols}T"' func_show_eval '$MV "${export_symbols}T" "$export_symbols"' fi fi fi if test -n "$export_symbols" && test -n "$include_expsyms"; then tmp_export_symbols="$export_symbols" test -n "$orig_export_symbols" && tmp_export_symbols="$orig_export_symbols" $opt_dry_run || eval '$ECHO "$include_expsyms" | $SP2NL >> "$tmp_export_symbols"' fi if test "X$skipped_export" != "X:" && test -n "$orig_export_symbols"; then # The given exports_symbols file has to be filtered, so filter it. func_verbose "filter symbol list for \`$libname.la' to tag DATA exports" # FIXME: $output_objdir/$libname.filter potentially contains lots of # 's' commands which not all seds can handle. GNU sed should be fine # though. Also, the filter scales superlinearly with the number of # global variables. join(1) would be nice here, but unfortunately # isn't a blessed tool. $opt_dry_run || $SED -e '/[ ,]DATA/!d;s,\(.*\)\([ \,].*\),s|^\1$|\1\2|,' < $export_symbols > $output_objdir/$libname.filter func_append delfiles " $export_symbols $output_objdir/$libname.filter" export_symbols=$output_objdir/$libname.def $opt_dry_run || $SED -f $output_objdir/$libname.filter < $orig_export_symbols > $export_symbols fi tmp_deplibs= for test_deplib in $deplibs; do case " $convenience " in *" $test_deplib "*) ;; *) func_append tmp_deplibs " $test_deplib" ;; esac done deplibs="$tmp_deplibs" if test -n "$convenience"; then if test -n "$whole_archive_flag_spec" && test "$compiler_needs_object" = yes && test -z "$libobjs"; then # extract the archives, so we have objects to list. # TODO: could optimize this to just extract one archive. whole_archive_flag_spec= fi if test -n "$whole_archive_flag_spec"; then save_libobjs=$libobjs eval libobjs=\"\$libobjs $whole_archive_flag_spec\" test "X$libobjs" = "X " && libobjs= else gentop="$output_objdir/${outputname}x" func_append generated " $gentop" func_extract_archives $gentop $convenience func_append libobjs " $func_extract_archives_result" test "X$libobjs" = "X " && libobjs= fi fi if test "$thread_safe" = yes && test -n "$thread_safe_flag_spec"; then eval flag=\"$thread_safe_flag_spec\" func_append linker_flags " $flag" fi # Make a backup of the uninstalled library when relinking if test "$opt_mode" = relink; then $opt_dry_run || eval '(cd $output_objdir && $RM ${realname}U && $MV $realname ${realname}U)' || exit $? fi # Do each of the archive commands. if test "$module" = yes && test -n "$module_cmds" ; then if test -n "$export_symbols" && test -n "$module_expsym_cmds"; then eval test_cmds=\"$module_expsym_cmds\" cmds=$module_expsym_cmds else eval test_cmds=\"$module_cmds\" cmds=$module_cmds fi else if test -n "$export_symbols" && test -n "$archive_expsym_cmds"; then eval test_cmds=\"$archive_expsym_cmds\" cmds=$archive_expsym_cmds else eval test_cmds=\"$archive_cmds\" cmds=$archive_cmds fi fi if test "X$skipped_export" != "X:" && func_len " $test_cmds" && len=$func_len_result && test "$len" -lt "$max_cmd_len" || test "$max_cmd_len" -le -1; then : else # The command line is too long to link in one step, link piecewise # or, if using GNU ld and skipped_export is not :, use a linker # script. # Save the value of $output and $libobjs because we want to # use them later. If we have whole_archive_flag_spec, we # want to use save_libobjs as it was before # whole_archive_flag_spec was expanded, because we can't # assume the linker understands whole_archive_flag_spec. # This may have to be revisited, in case too many # convenience libraries get linked in and end up exceeding # the spec. if test -z "$convenience" || test -z "$whole_archive_flag_spec"; then save_libobjs=$libobjs fi save_output=$output func_basename "$output" output_la=$func_basename_result # Clear the reloadable object creation command queue and # initialize k to one. test_cmds= concat_cmds= objlist= last_robj= k=1 if test -n "$save_libobjs" && test "X$skipped_export" != "X:" && test "$with_gnu_ld" = yes; then output=${output_objdir}/${output_la}.lnkscript func_verbose "creating GNU ld script: $output" echo 'INPUT (' > $output for obj in $save_libobjs do func_to_tool_file "$obj" $ECHO "$func_to_tool_file_result" >> $output done echo ')' >> $output func_append delfiles " $output" func_to_tool_file "$output" output=$func_to_tool_file_result elif test -n "$save_libobjs" && test "X$skipped_export" != "X:" && test "X$file_list_spec" != X; then output=${output_objdir}/${output_la}.lnk func_verbose "creating linker input file list: $output" : > $output set x $save_libobjs shift firstobj= if test "$compiler_needs_object" = yes; then firstobj="$1 " shift fi for obj do func_to_tool_file "$obj" $ECHO "$func_to_tool_file_result" >> $output done func_append delfiles " $output" func_to_tool_file "$output" output=$firstobj\"$file_list_spec$func_to_tool_file_result\" else if test -n "$save_libobjs"; then func_verbose "creating reloadable object files..." output=$output_objdir/$output_la-${k}.$objext eval test_cmds=\"$reload_cmds\" func_len " $test_cmds" len0=$func_len_result len=$len0 # Loop over the list of objects to be linked. for obj in $save_libobjs do func_len " $obj" func_arith $len + $func_len_result len=$func_arith_result if test "X$objlist" = X || test "$len" -lt "$max_cmd_len"; then func_append objlist " $obj" else # The command $test_cmds is almost too long, add a # command to the queue. if test "$k" -eq 1 ; then # The first file doesn't have a previous command to add. reload_objs=$objlist eval concat_cmds=\"$reload_cmds\" else # All subsequent reloadable object files will link in # the last one created. reload_objs="$objlist $last_robj" eval concat_cmds=\"\$concat_cmds~$reload_cmds~\$RM $last_robj\" fi last_robj=$output_objdir/$output_la-${k}.$objext func_arith $k + 1 k=$func_arith_result output=$output_objdir/$output_la-${k}.$objext objlist=" $obj" func_len " $last_robj" func_arith $len0 + $func_len_result len=$func_arith_result fi done # Handle the remaining objects by creating one last # reloadable object file. All subsequent reloadable object # files will link in the last one created. test -z "$concat_cmds" || concat_cmds=$concat_cmds~ reload_objs="$objlist $last_robj" eval concat_cmds=\"\${concat_cmds}$reload_cmds\" if test -n "$last_robj"; then eval concat_cmds=\"\${concat_cmds}~\$RM $last_robj\" fi func_append delfiles " $output" else output= fi if ${skipped_export-false}; then func_verbose "generating symbol list for \`$libname.la'" export_symbols="$output_objdir/$libname.exp" $opt_dry_run || $RM $export_symbols libobjs=$output # Append the command to create the export file. test -z "$concat_cmds" || concat_cmds=$concat_cmds~ eval concat_cmds=\"\$concat_cmds$export_symbols_cmds\" if test -n "$last_robj"; then eval concat_cmds=\"\$concat_cmds~\$RM $last_robj\" fi fi test -n "$save_libobjs" && func_verbose "creating a temporary reloadable object file: $output" # Loop through the commands generated above and execute them. save_ifs="$IFS"; IFS='~' for cmd in $concat_cmds; do IFS="$save_ifs" $opt_silent || { func_quote_for_expand "$cmd" eval "func_echo $func_quote_for_expand_result" } $opt_dry_run || eval "$cmd" || { lt_exit=$? # Restore the uninstalled library and exit if test "$opt_mode" = relink; then ( cd "$output_objdir" && \ $RM "${realname}T" && \ $MV "${realname}U" "$realname" ) fi exit $lt_exit } done IFS="$save_ifs" if test -n "$export_symbols_regex" && ${skipped_export-false}; then func_show_eval '$EGREP -e "$export_symbols_regex" "$export_symbols" > "${export_symbols}T"' func_show_eval '$MV "${export_symbols}T" "$export_symbols"' fi fi if ${skipped_export-false}; then if test -n "$export_symbols" && test -n "$include_expsyms"; then tmp_export_symbols="$export_symbols" test -n "$orig_export_symbols" && tmp_export_symbols="$orig_export_symbols" $opt_dry_run || eval '$ECHO "$include_expsyms" | $SP2NL >> "$tmp_export_symbols"' fi if test -n "$orig_export_symbols"; then # The given exports_symbols file has to be filtered, so filter it. func_verbose "filter symbol list for \`$libname.la' to tag DATA exports" # FIXME: $output_objdir/$libname.filter potentially contains lots of # 's' commands which not all seds can handle. GNU sed should be fine # though. Also, the filter scales superlinearly with the number of # global variables. join(1) would be nice here, but unfortunately # isn't a blessed tool. $opt_dry_run || $SED -e '/[ ,]DATA/!d;s,\(.*\)\([ \,].*\),s|^\1$|\1\2|,' < $export_symbols > $output_objdir/$libname.filter func_append delfiles " $export_symbols $output_objdir/$libname.filter" export_symbols=$output_objdir/$libname.def $opt_dry_run || $SED -f $output_objdir/$libname.filter < $orig_export_symbols > $export_symbols fi fi libobjs=$output # Restore the value of output. output=$save_output if test -n "$convenience" && test -n "$whole_archive_flag_spec"; then eval libobjs=\"\$libobjs $whole_archive_flag_spec\" test "X$libobjs" = "X " && libobjs= fi # Expand the library linking commands again to reset the # value of $libobjs for piecewise linking. # Do each of the archive commands. if test "$module" = yes && test -n "$module_cmds" ; then if test -n "$export_symbols" && test -n "$module_expsym_cmds"; then cmds=$module_expsym_cmds else cmds=$module_cmds fi else if test -n "$export_symbols" && test -n "$archive_expsym_cmds"; then cmds=$archive_expsym_cmds else cmds=$archive_cmds fi fi fi if test -n "$delfiles"; then # Append the command to remove temporary files to $cmds. eval cmds=\"\$cmds~\$RM $delfiles\" fi # Add any objects from preloaded convenience libraries if test -n "$dlprefiles"; then gentop="$output_objdir/${outputname}x" func_append generated " $gentop" func_extract_archives $gentop $dlprefiles func_append libobjs " $func_extract_archives_result" test "X$libobjs" = "X " && libobjs= fi save_ifs="$IFS"; IFS='~' for cmd in $cmds; do IFS="$save_ifs" eval cmd=\"$cmd\" $opt_silent || { func_quote_for_expand "$cmd" eval "func_echo $func_quote_for_expand_result" } $opt_dry_run || eval "$cmd" || { lt_exit=$? # Restore the uninstalled library and exit if test "$opt_mode" = relink; then ( cd "$output_objdir" && \ $RM "${realname}T" && \ $MV "${realname}U" "$realname" ) fi exit $lt_exit } done IFS="$save_ifs" # Restore the uninstalled library and exit if test "$opt_mode" = relink; then $opt_dry_run || eval '(cd $output_objdir && $RM ${realname}T && $MV $realname ${realname}T && $MV ${realname}U $realname)' || exit $? if test -n "$convenience"; then if test -z "$whole_archive_flag_spec"; then func_show_eval '${RM}r "$gentop"' fi fi exit $EXIT_SUCCESS fi # Create links to the real library. for linkname in $linknames; do if test "$realname" != "$linkname"; then func_show_eval '(cd "$output_objdir" && $RM "$linkname" && $LN_S "$realname" "$linkname")' 'exit $?' fi done # If -module or -export-dynamic was specified, set the dlname. if test "$module" = yes || test "$export_dynamic" = yes; then # On all known operating systems, these are identical. dlname="$soname" fi fi ;; obj) if test -n "$dlfiles$dlprefiles" || test "$dlself" != no; then func_warning "\`-dlopen' is ignored for objects" fi case " $deplibs" in *\ -l* | *\ -L*) func_warning "\`-l' and \`-L' are ignored for objects" ;; esac test -n "$rpath" && \ func_warning "\`-rpath' is ignored for objects" test -n "$xrpath" && \ func_warning "\`-R' is ignored for objects" test -n "$vinfo" && \ func_warning "\`-version-info' is ignored for objects" test -n "$release" && \ func_warning "\`-release' is ignored for objects" case $output in *.lo) test -n "$objs$old_deplibs" && \ func_fatal_error "cannot build library object \`$output' from non-libtool objects" libobj=$output func_lo2o "$libobj" obj=$func_lo2o_result ;; *) libobj= obj="$output" ;; esac # Delete the old objects. $opt_dry_run || $RM $obj $libobj # Objects from convenience libraries. This assumes # single-version convenience libraries. Whenever we create # different ones for PIC/non-PIC, this we'll have to duplicate # the extraction. reload_conv_objs= gentop= # reload_cmds runs $LD directly, so let us get rid of # -Wl from whole_archive_flag_spec and hope we can get by with # turning comma into space.. wl= if test -n "$convenience"; then if test -n "$whole_archive_flag_spec"; then eval tmp_whole_archive_flags=\"$whole_archive_flag_spec\" reload_conv_objs=$reload_objs\ `$ECHO "$tmp_whole_archive_flags" | $SED 's|,| |g'` else gentop="$output_objdir/${obj}x" func_append generated " $gentop" func_extract_archives $gentop $convenience reload_conv_objs="$reload_objs $func_extract_archives_result" fi fi # If we're not building shared, we need to use non_pic_objs test "$build_libtool_libs" != yes && libobjs="$non_pic_objects" # Create the old-style object. reload_objs="$objs$old_deplibs "`$ECHO "$libobjs" | $SP2NL | $SED "/\.${libext}$/d; /\.lib$/d; $lo2o" | $NL2SP`" $reload_conv_objs" ### testsuite: skip nested quoting test output="$obj" func_execute_cmds "$reload_cmds" 'exit $?' # Exit if we aren't doing a library object file. if test -z "$libobj"; then if test -n "$gentop"; then func_show_eval '${RM}r "$gentop"' fi exit $EXIT_SUCCESS fi if test "$build_libtool_libs" != yes; then if test -n "$gentop"; then func_show_eval '${RM}r "$gentop"' fi # Create an invalid libtool object if no PIC, so that we don't # accidentally link it into a program. # $show "echo timestamp > $libobj" # $opt_dry_run || eval "echo timestamp > $libobj" || exit $? exit $EXIT_SUCCESS fi if test -n "$pic_flag" || test "$pic_mode" != default; then # Only do commands if we really have different PIC objects. reload_objs="$libobjs $reload_conv_objs" output="$libobj" func_execute_cmds "$reload_cmds" 'exit $?' fi if test -n "$gentop"; then func_show_eval '${RM}r "$gentop"' fi exit $EXIT_SUCCESS ;; prog) case $host in *cygwin*) func_stripname '' '.exe' "$output" output=$func_stripname_result.exe;; esac test -n "$vinfo" && \ func_warning "\`-version-info' is ignored for programs" test -n "$release" && \ func_warning "\`-release' is ignored for programs" test "$preload" = yes \ && test "$dlopen_support" = unknown \ && test "$dlopen_self" = unknown \ && test "$dlopen_self_static" = unknown && \ func_warning "\`LT_INIT([dlopen])' not used. Assuming no dlopen support." case $host in *-*-rhapsody* | *-*-darwin1.[012]) # On Rhapsody replace the C library is the System framework compile_deplibs=`$ECHO " $compile_deplibs" | $SED 's/ -lc / System.ltframework /'` finalize_deplibs=`$ECHO " $finalize_deplibs" | $SED 's/ -lc / System.ltframework /'` ;; esac case $host in *-*-darwin*) # Don't allow lazy linking, it breaks C++ global constructors # But is supposedly fixed on 10.4 or later (yay!). if test "$tagname" = CXX ; then case ${MACOSX_DEPLOYMENT_TARGET-10.0} in 10.[0123]) func_append compile_command " ${wl}-bind_at_load" func_append finalize_command " ${wl}-bind_at_load" ;; esac fi # Time to change all our "foo.ltframework" stuff back to "-framework foo" compile_deplibs=`$ECHO " $compile_deplibs" | $SED 's% \([^ $]*\).ltframework% -framework \1%g'` finalize_deplibs=`$ECHO " $finalize_deplibs" | $SED 's% \([^ $]*\).ltframework% -framework \1%g'` ;; esac # move library search paths that coincide with paths to not yet # installed libraries to the beginning of the library search list new_libs= for path in $notinst_path; do case " $new_libs " in *" -L$path/$objdir "*) ;; *) case " $compile_deplibs " in *" -L$path/$objdir "*) func_append new_libs " -L$path/$objdir" ;; esac ;; esac done for deplib in $compile_deplibs; do case $deplib in -L*) case " $new_libs " in *" $deplib "*) ;; *) func_append new_libs " $deplib" ;; esac ;; *) func_append new_libs " $deplib" ;; esac done compile_deplibs="$new_libs" func_append compile_command " $compile_deplibs" func_append finalize_command " $finalize_deplibs" if test -n "$rpath$xrpath"; then # If the user specified any rpath flags, then add them. for libdir in $rpath $xrpath; do # This is the magic to use -rpath. case "$finalize_rpath " in *" $libdir "*) ;; *) func_append finalize_rpath " $libdir" ;; esac done fi # Now hardcode the library paths rpath= hardcode_libdirs= for libdir in $compile_rpath $finalize_rpath; do if test -n "$hardcode_libdir_flag_spec"; then if test -n "$hardcode_libdir_separator"; then if test -z "$hardcode_libdirs"; then hardcode_libdirs="$libdir" else # Just accumulate the unique libdirs. case $hardcode_libdir_separator$hardcode_libdirs$hardcode_libdir_separator in *"$hardcode_libdir_separator$libdir$hardcode_libdir_separator"*) ;; *) func_append hardcode_libdirs "$hardcode_libdir_separator$libdir" ;; esac fi else eval flag=\"$hardcode_libdir_flag_spec\" func_append rpath " $flag" fi elif test -n "$runpath_var"; then case "$perm_rpath " in *" $libdir "*) ;; *) func_append perm_rpath " $libdir" ;; esac fi case $host in *-*-cygwin* | *-*-mingw* | *-*-pw32* | *-*-os2* | *-cegcc*) testbindir=`${ECHO} "$libdir" | ${SED} -e 's*/lib$*/bin*'` case :$dllsearchpath: in *":$libdir:"*) ;; ::) dllsearchpath=$libdir;; *) func_append dllsearchpath ":$libdir";; esac case :$dllsearchpath: in *":$testbindir:"*) ;; ::) dllsearchpath=$testbindir;; *) func_append dllsearchpath ":$testbindir";; esac ;; esac done # Substitute the hardcoded libdirs into the rpath. if test -n "$hardcode_libdir_separator" && test -n "$hardcode_libdirs"; then libdir="$hardcode_libdirs" eval rpath=\" $hardcode_libdir_flag_spec\" fi compile_rpath="$rpath" rpath= hardcode_libdirs= for libdir in $finalize_rpath; do if test -n "$hardcode_libdir_flag_spec"; then if test -n "$hardcode_libdir_separator"; then if test -z "$hardcode_libdirs"; then hardcode_libdirs="$libdir" else # Just accumulate the unique libdirs. case $hardcode_libdir_separator$hardcode_libdirs$hardcode_libdir_separator in *"$hardcode_libdir_separator$libdir$hardcode_libdir_separator"*) ;; *) func_append hardcode_libdirs "$hardcode_libdir_separator$libdir" ;; esac fi else eval flag=\"$hardcode_libdir_flag_spec\" func_append rpath " $flag" fi elif test -n "$runpath_var"; then case "$finalize_perm_rpath " in *" $libdir "*) ;; *) func_append finalize_perm_rpath " $libdir" ;; esac fi done # Substitute the hardcoded libdirs into the rpath. if test -n "$hardcode_libdir_separator" && test -n "$hardcode_libdirs"; then libdir="$hardcode_libdirs" eval rpath=\" $hardcode_libdir_flag_spec\" fi finalize_rpath="$rpath" if test -n "$libobjs" && test "$build_old_libs" = yes; then # Transform all the library objects into standard objects. compile_command=`$ECHO "$compile_command" | $SP2NL | $SED "$lo2o" | $NL2SP` finalize_command=`$ECHO "$finalize_command" | $SP2NL | $SED "$lo2o" | $NL2SP` fi func_generate_dlsyms "$outputname" "@PROGRAM@" "no" # template prelinking step if test -n "$prelink_cmds"; then func_execute_cmds "$prelink_cmds" 'exit $?' fi wrappers_required=yes case $host in *cegcc* | *mingw32ce*) # Disable wrappers for cegcc and mingw32ce hosts, we are cross compiling anyway. wrappers_required=no ;; *cygwin* | *mingw* ) if test "$build_libtool_libs" != yes; then wrappers_required=no fi ;; *) if test "$need_relink" = no || test "$build_libtool_libs" != yes; then wrappers_required=no fi ;; esac if test "$wrappers_required" = no; then # Replace the output file specification. compile_command=`$ECHO "$compile_command" | $SED 's%@OUTPUT@%'"$output"'%g'` link_command="$compile_command$compile_rpath" # We have no uninstalled library dependencies, so finalize right now. exit_status=0 func_show_eval "$link_command" 'exit_status=$?' if test -n "$postlink_cmds"; then func_to_tool_file "$output" postlink_cmds=`func_echo_all "$postlink_cmds" | $SED -e 's%@OUTPUT@%'"$output"'%g' -e 's%@TOOL_OUTPUT@%'"$func_to_tool_file_result"'%g'` func_execute_cmds "$postlink_cmds" 'exit $?' fi # Delete the generated files. if test -f "$output_objdir/${outputname}S.${objext}"; then func_show_eval '$RM "$output_objdir/${outputname}S.${objext}"' fi exit $exit_status fi if test -n "$compile_shlibpath$finalize_shlibpath"; then compile_command="$shlibpath_var=\"$compile_shlibpath$finalize_shlibpath\$$shlibpath_var\" $compile_command" fi if test -n "$finalize_shlibpath"; then finalize_command="$shlibpath_var=\"$finalize_shlibpath\$$shlibpath_var\" $finalize_command" fi compile_var= finalize_var= if test -n "$runpath_var"; then if test -n "$perm_rpath"; then # We should set the runpath_var. rpath= for dir in $perm_rpath; do func_append rpath "$dir:" done compile_var="$runpath_var=\"$rpath\$$runpath_var\" " fi if test -n "$finalize_perm_rpath"; then # We should set the runpath_var. rpath= for dir in $finalize_perm_rpath; do func_append rpath "$dir:" done finalize_var="$runpath_var=\"$rpath\$$runpath_var\" " fi fi if test "$no_install" = yes; then # We don't need to create a wrapper script. link_command="$compile_var$compile_command$compile_rpath" # Replace the output file specification. link_command=`$ECHO "$link_command" | $SED 's%@OUTPUT@%'"$output"'%g'` # Delete the old output file. $opt_dry_run || $RM $output # Link the executable and exit func_show_eval "$link_command" 'exit $?' if test -n "$postlink_cmds"; then func_to_tool_file "$output" postlink_cmds=`func_echo_all "$postlink_cmds" | $SED -e 's%@OUTPUT@%'"$output"'%g' -e 's%@TOOL_OUTPUT@%'"$func_to_tool_file_result"'%g'` func_execute_cmds "$postlink_cmds" 'exit $?' fi exit $EXIT_SUCCESS fi if test "$hardcode_action" = relink; then # Fast installation is not supported link_command="$compile_var$compile_command$compile_rpath" relink_command="$finalize_var$finalize_command$finalize_rpath" func_warning "this platform does not like uninstalled shared libraries" func_warning "\`$output' will be relinked during installation" else if test "$fast_install" != no; then link_command="$finalize_var$compile_command$finalize_rpath" if test "$fast_install" = yes; then relink_command=`$ECHO "$compile_var$compile_command$compile_rpath" | $SED 's%@OUTPUT@%\$progdir/\$file%g'` else # fast_install is set to needless relink_command= fi else link_command="$compile_var$compile_command$compile_rpath" relink_command="$finalize_var$finalize_command$finalize_rpath" fi fi # Replace the output file specification. link_command=`$ECHO "$link_command" | $SED 's%@OUTPUT@%'"$output_objdir/$outputname"'%g'` # Delete the old output files. $opt_dry_run || $RM $output $output_objdir/$outputname $output_objdir/lt-$outputname func_show_eval "$link_command" 'exit $?' if test -n "$postlink_cmds"; then func_to_tool_file "$output_objdir/$outputname" postlink_cmds=`func_echo_all "$postlink_cmds" | $SED -e 's%@OUTPUT@%'"$output_objdir/$outputname"'%g' -e 's%@TOOL_OUTPUT@%'"$func_to_tool_file_result"'%g'` func_execute_cmds "$postlink_cmds" 'exit $?' fi # Now create the wrapper script. func_verbose "creating $output" # Quote the relink command for shipping. if test -n "$relink_command"; then # Preserve any variables that may affect compiler behavior for var in $variables_saved_for_relink; do if eval test -z \"\${$var+set}\"; then relink_command="{ test -z \"\${$var+set}\" || $lt_unset $var || { $var=; export $var; }; }; $relink_command" elif eval var_value=\$$var; test -z "$var_value"; then relink_command="$var=; export $var; $relink_command" else func_quote_for_eval "$var_value" relink_command="$var=$func_quote_for_eval_result; export $var; $relink_command" fi done relink_command="(cd `pwd`; $relink_command)" relink_command=`$ECHO "$relink_command" | $SED "$sed_quote_subst"` fi # Only actually do things if not in dry run mode. $opt_dry_run || { # win32 will think the script is a binary if it has # a .exe suffix, so we strip it off here. case $output in *.exe) func_stripname '' '.exe' "$output" output=$func_stripname_result ;; esac # test for cygwin because mv fails w/o .exe extensions case $host in *cygwin*) exeext=.exe func_stripname '' '.exe' "$outputname" outputname=$func_stripname_result ;; *) exeext= ;; esac case $host in *cygwin* | *mingw* ) func_dirname_and_basename "$output" "" "." output_name=$func_basename_result output_path=$func_dirname_result cwrappersource="$output_path/$objdir/lt-$output_name.c" cwrapper="$output_path/$output_name.exe" $RM $cwrappersource $cwrapper trap "$RM $cwrappersource $cwrapper; exit $EXIT_FAILURE" 1 2 15 func_emit_cwrapperexe_src > $cwrappersource # The wrapper executable is built using the $host compiler, # because it contains $host paths and files. If cross- # compiling, it, like the target executable, must be # executed on the $host or under an emulation environment. $opt_dry_run || { $LTCC $LTCFLAGS -o $cwrapper $cwrappersource $STRIP $cwrapper } # Now, create the wrapper script for func_source use: func_ltwrapper_scriptname $cwrapper $RM $func_ltwrapper_scriptname_result trap "$RM $func_ltwrapper_scriptname_result; exit $EXIT_FAILURE" 1 2 15 $opt_dry_run || { # note: this script will not be executed, so do not chmod. if test "x$build" = "x$host" ; then $cwrapper --lt-dump-script > $func_ltwrapper_scriptname_result else func_emit_wrapper no > $func_ltwrapper_scriptname_result fi } ;; * ) $RM $output trap "$RM $output; exit $EXIT_FAILURE" 1 2 15 func_emit_wrapper no > $output chmod +x $output ;; esac } exit $EXIT_SUCCESS ;; esac # See if we need to build an old-fashioned archive. for oldlib in $oldlibs; do if test "$build_libtool_libs" = convenience; then oldobjs="$libobjs_save $symfileobj" addlibs="$convenience" build_libtool_libs=no else if test "$build_libtool_libs" = module; then oldobjs="$libobjs_save" build_libtool_libs=no else oldobjs="$old_deplibs $non_pic_objects" if test "$preload" = yes && test -f "$symfileobj"; then func_append oldobjs " $symfileobj" fi fi addlibs="$old_convenience" fi if test -n "$addlibs"; then gentop="$output_objdir/${outputname}x" func_append generated " $gentop" func_extract_archives $gentop $addlibs func_append oldobjs " $func_extract_archives_result" fi # Do each command in the archive commands. if test -n "$old_archive_from_new_cmds" && test "$build_libtool_libs" = yes; then cmds=$old_archive_from_new_cmds else # Add any objects from preloaded convenience libraries if test -n "$dlprefiles"; then gentop="$output_objdir/${outputname}x" func_append generated " $gentop" func_extract_archives $gentop $dlprefiles func_append oldobjs " $func_extract_archives_result" fi # POSIX demands no paths to be encoded in archives. We have # to avoid creating archives with duplicate basenames if we # might have to extract them afterwards, e.g., when creating a # static archive out of a convenience library, or when linking # the entirety of a libtool archive into another (currently # not supported by libtool). if (for obj in $oldobjs do func_basename "$obj" $ECHO "$func_basename_result" done | sort | sort -uc >/dev/null 2>&1); then : else echo "copying selected object files to avoid basename conflicts..." gentop="$output_objdir/${outputname}x" func_append generated " $gentop" func_mkdir_p "$gentop" save_oldobjs=$oldobjs oldobjs= counter=1 for obj in $save_oldobjs do func_basename "$obj" objbase="$func_basename_result" case " $oldobjs " in " ") oldobjs=$obj ;; *[\ /]"$objbase "*) while :; do # Make sure we don't pick an alternate name that also # overlaps. newobj=lt$counter-$objbase func_arith $counter + 1 counter=$func_arith_result case " $oldobjs " in *[\ /]"$newobj "*) ;; *) if test ! -f "$gentop/$newobj"; then break; fi ;; esac done func_show_eval "ln $obj $gentop/$newobj || cp $obj $gentop/$newobj" func_append oldobjs " $gentop/$newobj" ;; *) func_append oldobjs " $obj" ;; esac done fi func_to_tool_file "$oldlib" func_convert_file_msys_to_w32 tool_oldlib=$func_to_tool_file_result eval cmds=\"$old_archive_cmds\" func_len " $cmds" len=$func_len_result if test "$len" -lt "$max_cmd_len" || test "$max_cmd_len" -le -1; then cmds=$old_archive_cmds elif test -n "$archiver_list_spec"; then func_verbose "using command file archive linking..." for obj in $oldobjs do func_to_tool_file "$obj" $ECHO "$func_to_tool_file_result" done > $output_objdir/$libname.libcmd func_to_tool_file "$output_objdir/$libname.libcmd" oldobjs=" $archiver_list_spec$func_to_tool_file_result" cmds=$old_archive_cmds else # the command line is too long to link in one step, link in parts func_verbose "using piecewise archive linking..." save_RANLIB=$RANLIB RANLIB=: objlist= concat_cmds= save_oldobjs=$oldobjs oldobjs= # Is there a better way of finding the last object in the list? for obj in $save_oldobjs do last_oldobj=$obj done eval test_cmds=\"$old_archive_cmds\" func_len " $test_cmds" len0=$func_len_result len=$len0 for obj in $save_oldobjs do func_len " $obj" func_arith $len + $func_len_result len=$func_arith_result func_append objlist " $obj" if test "$len" -lt "$max_cmd_len"; then : else # the above command should be used before it gets too long oldobjs=$objlist if test "$obj" = "$last_oldobj" ; then RANLIB=$save_RANLIB fi test -z "$concat_cmds" || concat_cmds=$concat_cmds~ eval concat_cmds=\"\${concat_cmds}$old_archive_cmds\" objlist= len=$len0 fi done RANLIB=$save_RANLIB oldobjs=$objlist if test "X$oldobjs" = "X" ; then eval cmds=\"\$concat_cmds\" else eval cmds=\"\$concat_cmds~\$old_archive_cmds\" fi fi fi func_execute_cmds "$cmds" 'exit $?' done test -n "$generated" && \ func_show_eval "${RM}r$generated" # Now create the libtool archive. case $output in *.la) old_library= test "$build_old_libs" = yes && old_library="$libname.$libext" func_verbose "creating $output" # Preserve any variables that may affect compiler behavior for var in $variables_saved_for_relink; do if eval test -z \"\${$var+set}\"; then relink_command="{ test -z \"\${$var+set}\" || $lt_unset $var || { $var=; export $var; }; }; $relink_command" elif eval var_value=\$$var; test -z "$var_value"; then relink_command="$var=; export $var; $relink_command" else func_quote_for_eval "$var_value" relink_command="$var=$func_quote_for_eval_result; export $var; $relink_command" fi done # Quote the link command for shipping. relink_command="(cd `pwd`; $SHELL $progpath $preserve_args --mode=relink $libtool_args @inst_prefix_dir@)" relink_command=`$ECHO "$relink_command" | $SED "$sed_quote_subst"` if test "$hardcode_automatic" = yes ; then relink_command= fi # Only create the output if not a dry run. $opt_dry_run || { for installed in no yes; do if test "$installed" = yes; then if test -z "$install_libdir"; then break fi output="$output_objdir/$outputname"i # Replace all uninstalled libtool libraries with the installed ones newdependency_libs= for deplib in $dependency_libs; do case $deplib in *.la) func_basename "$deplib" name="$func_basename_result" func_resolve_sysroot "$deplib" eval libdir=`${SED} -n -e 's/^libdir=\(.*\)$/\1/p' $func_resolve_sysroot_result` test -z "$libdir" && \ func_fatal_error "\`$deplib' is not a valid libtool archive" func_append newdependency_libs " ${lt_sysroot:+=}$libdir/$name" ;; -L*) func_stripname -L '' "$deplib" func_replace_sysroot "$func_stripname_result" func_append newdependency_libs " -L$func_replace_sysroot_result" ;; -R*) func_stripname -R '' "$deplib" func_replace_sysroot "$func_stripname_result" func_append newdependency_libs " -R$func_replace_sysroot_result" ;; *) func_append newdependency_libs " $deplib" ;; esac done dependency_libs="$newdependency_libs" newdlfiles= for lib in $dlfiles; do case $lib in *.la) func_basename "$lib" name="$func_basename_result" eval libdir=`${SED} -n -e 's/^libdir=\(.*\)$/\1/p' $lib` test -z "$libdir" && \ func_fatal_error "\`$lib' is not a valid libtool archive" func_append newdlfiles " ${lt_sysroot:+=}$libdir/$name" ;; *) func_append newdlfiles " $lib" ;; esac done dlfiles="$newdlfiles" newdlprefiles= for lib in $dlprefiles; do case $lib in *.la) # Only pass preopened files to the pseudo-archive (for # eventual linking with the app. that links it) if we # didn't already link the preopened objects directly into # the library: func_basename "$lib" name="$func_basename_result" eval libdir=`${SED} -n -e 's/^libdir=\(.*\)$/\1/p' $lib` test -z "$libdir" && \ func_fatal_error "\`$lib' is not a valid libtool archive" func_append newdlprefiles " ${lt_sysroot:+=}$libdir/$name" ;; esac done dlprefiles="$newdlprefiles" else newdlfiles= for lib in $dlfiles; do case $lib in [\\/]* | [A-Za-z]:[\\/]*) abs="$lib" ;; *) abs=`pwd`"/$lib" ;; esac func_append newdlfiles " $abs" done dlfiles="$newdlfiles" newdlprefiles= for lib in $dlprefiles; do case $lib in [\\/]* | [A-Za-z]:[\\/]*) abs="$lib" ;; *) abs=`pwd`"/$lib" ;; esac func_append newdlprefiles " $abs" done dlprefiles="$newdlprefiles" fi $RM $output # place dlname in correct position for cygwin # In fact, it would be nice if we could use this code for all target # systems that can't hard-code library paths into their executables # and that have no shared library path variable independent of PATH, # but it turns out we can't easily determine that from inspecting # libtool variables, so we have to hard-code the OSs to which it # applies here; at the moment, that means platforms that use the PE # object format with DLL files. See the long comment at the top of # tests/bindir.at for full details. tdlname=$dlname case $host,$output,$installed,$module,$dlname in *cygwin*,*lai,yes,no,*.dll | *mingw*,*lai,yes,no,*.dll | *cegcc*,*lai,yes,no,*.dll) # If a -bindir argument was supplied, place the dll there. if test "x$bindir" != x ; then func_relative_path "$install_libdir" "$bindir" tdlname=$func_relative_path_result$dlname else # Otherwise fall back on heuristic. tdlname=../bin/$dlname fi ;; esac $ECHO > $output "\ # $outputname - a libtool library file # Generated by $PROGRAM (GNU $PACKAGE$TIMESTAMP) $VERSION # # Please DO NOT delete this file! # It is necessary for linking the library. # The name that we can dlopen(3). dlname='$tdlname' # Names of this library. library_names='$library_names' # The name of the static archive. old_library='$old_library' # Linker flags that can not go in dependency_libs. inherited_linker_flags='$new_inherited_linker_flags' # Libraries that this one depends upon. dependency_libs='$dependency_libs' # Names of additional weak libraries provided by this library weak_library_names='$weak_libs' # Version information for $libname. current=$current age=$age revision=$revision # Is this an already installed library? installed=$installed # Should we warn about portability when linking against -modules? shouldnotlink=$module # Files to dlopen/dlpreopen dlopen='$dlfiles' dlpreopen='$dlprefiles' # Directory that this library needs to be installed in: libdir='$install_libdir'" if test "$installed" = no && test "$need_relink" = yes; then $ECHO >> $output "\ relink_command=\"$relink_command\"" fi done } # Do a symbolic link so that the libtool archive can be found in # LD_LIBRARY_PATH before the program is installed. func_show_eval '( cd "$output_objdir" && $RM "$outputname" && $LN_S "../$outputname" "$outputname" )' 'exit $?' ;; esac exit $EXIT_SUCCESS } { test "$opt_mode" = link || test "$opt_mode" = relink; } && func_mode_link ${1+"$@"} # func_mode_uninstall arg... func_mode_uninstall () { $opt_debug RM="$nonopt" files= rmforce= exit_status=0 # This variable tells wrapper scripts just to set variables rather # than running their programs. libtool_install_magic="$magic" for arg do case $arg in -f) func_append RM " $arg"; rmforce=yes ;; -*) func_append RM " $arg" ;; *) func_append files " $arg" ;; esac done test -z "$RM" && \ func_fatal_help "you must specify an RM program" rmdirs= for file in $files; do func_dirname "$file" "" "." dir="$func_dirname_result" if test "X$dir" = X.; then odir="$objdir" else odir="$dir/$objdir" fi func_basename "$file" name="$func_basename_result" test "$opt_mode" = uninstall && odir="$dir" # Remember odir for removal later, being careful to avoid duplicates if test "$opt_mode" = clean; then case " $rmdirs " in *" $odir "*) ;; *) func_append rmdirs " $odir" ;; esac fi # Don't error if the file doesn't exist and rm -f was used. if { test -L "$file"; } >/dev/null 2>&1 || { test -h "$file"; } >/dev/null 2>&1 || test -f "$file"; then : elif test -d "$file"; then exit_status=1 continue elif test "$rmforce" = yes; then continue fi rmfiles="$file" case $name in *.la) # Possibly a libtool archive, so verify it. if func_lalib_p "$file"; then func_source $dir/$name # Delete the libtool libraries and symlinks. for n in $library_names; do func_append rmfiles " $odir/$n" done test -n "$old_library" && func_append rmfiles " $odir/$old_library" case "$opt_mode" in clean) case " $library_names " in *" $dlname "*) ;; *) test -n "$dlname" && func_append rmfiles " $odir/$dlname" ;; esac test -n "$libdir" && func_append rmfiles " $odir/$name $odir/${name}i" ;; uninstall) if test -n "$library_names"; then # Do each command in the postuninstall commands. func_execute_cmds "$postuninstall_cmds" 'test "$rmforce" = yes || exit_status=1' fi if test -n "$old_library"; then # Do each command in the old_postuninstall commands. func_execute_cmds "$old_postuninstall_cmds" 'test "$rmforce" = yes || exit_status=1' fi # FIXME: should reinstall the best remaining shared library. ;; esac fi ;; *.lo) # Possibly a libtool object, so verify it. if func_lalib_p "$file"; then # Read the .lo file func_source $dir/$name # Add PIC object to the list of files to remove. if test -n "$pic_object" && test "$pic_object" != none; then func_append rmfiles " $dir/$pic_object" fi # Add non-PIC object to the list of files to remove. if test -n "$non_pic_object" && test "$non_pic_object" != none; then func_append rmfiles " $dir/$non_pic_object" fi fi ;; *) if test "$opt_mode" = clean ; then noexename=$name case $file in *.exe) func_stripname '' '.exe' "$file" file=$func_stripname_result func_stripname '' '.exe' "$name" noexename=$func_stripname_result # $file with .exe has already been added to rmfiles, # add $file without .exe func_append rmfiles " $file" ;; esac # Do a test to see if this is a libtool program. if func_ltwrapper_p "$file"; then if func_ltwrapper_executable_p "$file"; then func_ltwrapper_scriptname "$file" relink_command= func_source $func_ltwrapper_scriptname_result func_append rmfiles " $func_ltwrapper_scriptname_result" else relink_command= func_source $dir/$noexename fi # note $name still contains .exe if it was in $file originally # as does the version of $file that was added into $rmfiles func_append rmfiles " $odir/$name $odir/${name}S.${objext}" if test "$fast_install" = yes && test -n "$relink_command"; then func_append rmfiles " $odir/lt-$name" fi if test "X$noexename" != "X$name" ; then func_append rmfiles " $odir/lt-${noexename}.c" fi fi fi ;; esac func_show_eval "$RM $rmfiles" 'exit_status=1' done # Try to remove the ${objdir}s in the directories where we deleted files for dir in $rmdirs; do if test -d "$dir"; then func_show_eval "rmdir $dir >/dev/null 2>&1" fi done exit $exit_status } { test "$opt_mode" = uninstall || test "$opt_mode" = clean; } && func_mode_uninstall ${1+"$@"} test -z "$opt_mode" && { help="$generic_help" func_fatal_help "you must specify a MODE" } test -z "$exec_cmd" && \ func_fatal_help "invalid operation mode \`$opt_mode'" if test -n "$exec_cmd"; then eval exec "$exec_cmd" exit $EXIT_FAILURE fi exit $exit_status # The TAGs below are defined such that we never get into a situation # in which we disable both kinds of libraries. Given conflicting # choices, we go for a static library, that is the most portable, # since we can't tell whether shared libraries were disabled because # the user asked for that or because the platform doesn't support # them. This is particularly important on AIX, because we don't # support having both static and shared libraries enabled at the same # time on that platform, so we default to a shared-only configuration. # If a disable-shared tag is given, we'll fallback to a static-only # configuration. But we'll never go from static-only to shared-only. # ### BEGIN LIBTOOL TAG CONFIG: disable-shared build_libtool_libs=no build_old_libs=yes # ### END LIBTOOL TAG CONFIG: disable-shared # ### BEGIN LIBTOOL TAG CONFIG: disable-static build_old_libs=`case $build_libtool_libs in yes) echo no;; *) echo yes;; esac` # ### END LIBTOOL TAG CONFIG: disable-static # Local Variables: # mode:shell-script # sh-indentation:2 # End: # vi:sw=2 libstoragemgmt-1.2.3/build-aux/install-sh0000755000175000017500000003325512540163524015366 00000000000000#!/bin/sh # install - install a program, script, or datafile scriptversion=2011-11-20.07; # UTC # This originates from X11R5 (mit/util/scripts/install.sh), which was # later released in X11R6 (xc/config/util/install.sh) with the # following copyright and license. # # Copyright (C) 1994 X Consortium # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to # deal in the Software without restriction, including without limitation the # rights to use, copy, modify, merge, publish, distribute, sublicense, and/or # sell copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # X CONSORTIUM BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN # AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNEC- # TION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. # # Except as contained in this notice, the name of the X Consortium shall not # be used in advertising or otherwise to promote the sale, use or other deal- # ings in this Software without prior written authorization from the X Consor- # tium. # # # FSF changes to this file are in the public domain. # # Calling this script install-sh is preferred over install.sh, to prevent # 'make' implicit rules from creating a file called install from it # when there is no Makefile. # # This script is compatible with the BSD install script, but was written # from scratch. nl=' ' IFS=" "" $nl" # set DOITPROG to echo to test this script # Don't use :- since 4.3BSD and earlier shells don't like it. doit=${DOITPROG-} if test -z "$doit"; then doit_exec=exec else doit_exec=$doit fi # Put in absolute file names if you don't have them in your path; # or use environment vars. chgrpprog=${CHGRPPROG-chgrp} chmodprog=${CHMODPROG-chmod} chownprog=${CHOWNPROG-chown} cmpprog=${CMPPROG-cmp} cpprog=${CPPROG-cp} mkdirprog=${MKDIRPROG-mkdir} mvprog=${MVPROG-mv} rmprog=${RMPROG-rm} stripprog=${STRIPPROG-strip} posix_glob='?' initialize_posix_glob=' test "$posix_glob" != "?" || { if (set -f) 2>/dev/null; then posix_glob= else posix_glob=: fi } ' posix_mkdir= # Desired mode of installed file. mode=0755 chgrpcmd= chmodcmd=$chmodprog chowncmd= mvcmd=$mvprog rmcmd="$rmprog -f" stripcmd= src= dst= dir_arg= dst_arg= copy_on_change=false no_target_directory= usage="\ Usage: $0 [OPTION]... [-T] SRCFILE DSTFILE or: $0 [OPTION]... SRCFILES... DIRECTORY or: $0 [OPTION]... -t DIRECTORY SRCFILES... or: $0 [OPTION]... -d DIRECTORIES... In the 1st form, copy SRCFILE to DSTFILE. In the 2nd and 3rd, copy all SRCFILES to DIRECTORY. In the 4th, create DIRECTORIES. Options: --help display this help and exit. --version display version info and exit. -c (ignored) -C install only if different (preserve the last data modification time) -d create directories instead of installing files. -g GROUP $chgrpprog installed files to GROUP. -m MODE $chmodprog installed files to MODE. -o USER $chownprog installed files to USER. -s $stripprog installed files. -t DIRECTORY install into DIRECTORY. -T report an error if DSTFILE is a directory. Environment variables override the default commands: CHGRPPROG CHMODPROG CHOWNPROG CMPPROG CPPROG MKDIRPROG MVPROG RMPROG STRIPPROG " while test $# -ne 0; do case $1 in -c) ;; -C) copy_on_change=true;; -d) dir_arg=true;; -g) chgrpcmd="$chgrpprog $2" shift;; --help) echo "$usage"; exit $?;; -m) mode=$2 case $mode in *' '* | *' '* | *' '* | *'*'* | *'?'* | *'['*) echo "$0: invalid mode: $mode" >&2 exit 1;; esac shift;; -o) chowncmd="$chownprog $2" shift;; -s) stripcmd=$stripprog;; -t) dst_arg=$2 # Protect names problematic for 'test' and other utilities. case $dst_arg in -* | [=\(\)!]) dst_arg=./$dst_arg;; esac shift;; -T) no_target_directory=true;; --version) echo "$0 $scriptversion"; exit $?;; --) shift break;; -*) echo "$0: invalid option: $1" >&2 exit 1;; *) break;; esac shift done if test $# -ne 0 && test -z "$dir_arg$dst_arg"; then # When -d is used, all remaining arguments are directories to create. # When -t is used, the destination is already specified. # Otherwise, the last argument is the destination. Remove it from $@. for arg do if test -n "$dst_arg"; then # $@ is not empty: it contains at least $arg. set fnord "$@" "$dst_arg" shift # fnord fi shift # arg dst_arg=$arg # Protect names problematic for 'test' and other utilities. case $dst_arg in -* | [=\(\)!]) dst_arg=./$dst_arg;; esac done fi if test $# -eq 0; then if test -z "$dir_arg"; then echo "$0: no input file specified." >&2 exit 1 fi # It's OK to call 'install-sh -d' without argument. # This can happen when creating conditional directories. exit 0 fi if test -z "$dir_arg"; then do_exit='(exit $ret); exit $ret' trap "ret=129; $do_exit" 1 trap "ret=130; $do_exit" 2 trap "ret=141; $do_exit" 13 trap "ret=143; $do_exit" 15 # Set umask so as not to create temps with too-generous modes. # However, 'strip' requires both read and write access to temps. case $mode in # Optimize common cases. *644) cp_umask=133;; *755) cp_umask=22;; *[0-7]) if test -z "$stripcmd"; then u_plus_rw= else u_plus_rw='% 200' fi cp_umask=`expr '(' 777 - $mode % 1000 ')' $u_plus_rw`;; *) if test -z "$stripcmd"; then u_plus_rw= else u_plus_rw=,u+rw fi cp_umask=$mode$u_plus_rw;; esac fi for src do # Protect names problematic for 'test' and other utilities. case $src in -* | [=\(\)!]) src=./$src;; esac if test -n "$dir_arg"; then dst=$src dstdir=$dst test -d "$dstdir" dstdir_status=$? else # Waiting for this to be detected by the "$cpprog $src $dsttmp" command # might cause directories to be created, which would be especially bad # if $src (and thus $dsttmp) contains '*'. if test ! -f "$src" && test ! -d "$src"; then echo "$0: $src does not exist." >&2 exit 1 fi if test -z "$dst_arg"; then echo "$0: no destination specified." >&2 exit 1 fi dst=$dst_arg # If destination is a directory, append the input filename; won't work # if double slashes aren't ignored. if test -d "$dst"; then if test -n "$no_target_directory"; then echo "$0: $dst_arg: Is a directory" >&2 exit 1 fi dstdir=$dst dst=$dstdir/`basename "$src"` dstdir_status=0 else # Prefer dirname, but fall back on a substitute if dirname fails. dstdir=` (dirname "$dst") 2>/dev/null || expr X"$dst" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ X"$dst" : 'X\(//\)[^/]' \| \ X"$dst" : 'X\(//\)$' \| \ X"$dst" : 'X\(/\)' \| . 2>/dev/null || echo X"$dst" | sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ s//\1/ q } /^X\(\/\/\)[^/].*/{ s//\1/ q } /^X\(\/\/\)$/{ s//\1/ q } /^X\(\/\).*/{ s//\1/ q } s/.*/./; q' ` test -d "$dstdir" dstdir_status=$? fi fi obsolete_mkdir_used=false if test $dstdir_status != 0; then case $posix_mkdir in '') # Create intermediate dirs using mode 755 as modified by the umask. # This is like FreeBSD 'install' as of 1997-10-28. umask=`umask` case $stripcmd.$umask in # Optimize common cases. *[2367][2367]) mkdir_umask=$umask;; .*0[02][02] | .[02][02] | .[02]) mkdir_umask=22;; *[0-7]) mkdir_umask=`expr $umask + 22 \ - $umask % 100 % 40 + $umask % 20 \ - $umask % 10 % 4 + $umask % 2 `;; *) mkdir_umask=$umask,go-w;; esac # With -d, create the new directory with the user-specified mode. # Otherwise, rely on $mkdir_umask. if test -n "$dir_arg"; then mkdir_mode=-m$mode else mkdir_mode= fi posix_mkdir=false case $umask in *[123567][0-7][0-7]) # POSIX mkdir -p sets u+wx bits regardless of umask, which # is incompatible with FreeBSD 'install' when (umask & 300) != 0. ;; *) tmpdir=${TMPDIR-/tmp}/ins$RANDOM-$$ trap 'ret=$?; rmdir "$tmpdir/d" "$tmpdir" 2>/dev/null; exit $ret' 0 if (umask $mkdir_umask && exec $mkdirprog $mkdir_mode -p -- "$tmpdir/d") >/dev/null 2>&1 then if test -z "$dir_arg" || { # Check for POSIX incompatibilities with -m. # HP-UX 11.23 and IRIX 6.5 mkdir -m -p sets group- or # other-writable bit of parent directory when it shouldn't. # FreeBSD 6.1 mkdir -m -p sets mode of existing directory. ls_ld_tmpdir=`ls -ld "$tmpdir"` case $ls_ld_tmpdir in d????-?r-*) different_mode=700;; d????-?--*) different_mode=755;; *) false;; esac && $mkdirprog -m$different_mode -p -- "$tmpdir" && { ls_ld_tmpdir_1=`ls -ld "$tmpdir"` test "$ls_ld_tmpdir" = "$ls_ld_tmpdir_1" } } then posix_mkdir=: fi rmdir "$tmpdir/d" "$tmpdir" else # Remove any dirs left behind by ancient mkdir implementations. rmdir ./$mkdir_mode ./-p ./-- 2>/dev/null fi trap '' 0;; esac;; esac if $posix_mkdir && ( umask $mkdir_umask && $doit_exec $mkdirprog $mkdir_mode -p -- "$dstdir" ) then : else # The umask is ridiculous, or mkdir does not conform to POSIX, # or it failed possibly due to a race condition. Create the # directory the slow way, step by step, checking for races as we go. case $dstdir in /*) prefix='/';; [-=\(\)!]*) prefix='./';; *) prefix='';; esac eval "$initialize_posix_glob" oIFS=$IFS IFS=/ $posix_glob set -f set fnord $dstdir shift $posix_glob set +f IFS=$oIFS prefixes= for d do test X"$d" = X && continue prefix=$prefix$d if test -d "$prefix"; then prefixes= else if $posix_mkdir; then (umask=$mkdir_umask && $doit_exec $mkdirprog $mkdir_mode -p -- "$dstdir") && break # Don't fail if two instances are running concurrently. test -d "$prefix" || exit 1 else case $prefix in *\'*) qprefix=`echo "$prefix" | sed "s/'/'\\\\\\\\''/g"`;; *) qprefix=$prefix;; esac prefixes="$prefixes '$qprefix'" fi fi prefix=$prefix/ done if test -n "$prefixes"; then # Don't fail if two instances are running concurrently. (umask $mkdir_umask && eval "\$doit_exec \$mkdirprog $prefixes") || test -d "$dstdir" || exit 1 obsolete_mkdir_used=true fi fi fi if test -n "$dir_arg"; then { test -z "$chowncmd" || $doit $chowncmd "$dst"; } && { test -z "$chgrpcmd" || $doit $chgrpcmd "$dst"; } && { test "$obsolete_mkdir_used$chowncmd$chgrpcmd" = false || test -z "$chmodcmd" || $doit $chmodcmd $mode "$dst"; } || exit 1 else # Make a couple of temp file names in the proper directory. dsttmp=$dstdir/_inst.$$_ rmtmp=$dstdir/_rm.$$_ # Trap to clean up those temp files at exit. trap 'ret=$?; rm -f "$dsttmp" "$rmtmp" && exit $ret' 0 # Copy the file name to the temp name. (umask $cp_umask && $doit_exec $cpprog "$src" "$dsttmp") && # and set any options; do chmod last to preserve setuid bits. # # If any of these fail, we abort the whole thing. If we want to # ignore errors from any of these, just make sure not to ignore # errors from the above "$doit $cpprog $src $dsttmp" command. # { test -z "$chowncmd" || $doit $chowncmd "$dsttmp"; } && { test -z "$chgrpcmd" || $doit $chgrpcmd "$dsttmp"; } && { test -z "$stripcmd" || $doit $stripcmd "$dsttmp"; } && { test -z "$chmodcmd" || $doit $chmodcmd $mode "$dsttmp"; } && # If -C, don't bother to copy if it wouldn't change the file. if $copy_on_change && old=`LC_ALL=C ls -dlL "$dst" 2>/dev/null` && new=`LC_ALL=C ls -dlL "$dsttmp" 2>/dev/null` && eval "$initialize_posix_glob" && $posix_glob set -f && set X $old && old=:$2:$4:$5:$6 && set X $new && new=:$2:$4:$5:$6 && $posix_glob set +f && test "$old" = "$new" && $cmpprog "$dst" "$dsttmp" >/dev/null 2>&1 then rm -f "$dsttmp" else # Rename the file to the real destination. $doit $mvcmd -f "$dsttmp" "$dst" 2>/dev/null || # The rename failed, perhaps because mv can't rename something else # to itself, or perhaps because mv is so ancient that it does not # support -f. { # Now remove or move aside any old file at destination location. # We try this two ways since rm can't unlink itself on some # systems and the destination file might be busy for other # reasons. In this case, the final cleanup might fail but the new # file should still install successfully. { test ! -f "$dst" || $doit $rmcmd -f "$dst" 2>/dev/null || { $doit $mvcmd -f "$dst" "$rmtmp" 2>/dev/null && { $doit $rmcmd -f "$rmtmp" 2>/dev/null; :; } } || { echo "$0: cannot unlink or rename $dst" >&2 (exit 1); exit 1 } } && # Now rename the file to the real destination. $doit $mvcmd "$dsttmp" "$dst" } fi || exit 1 trap '' 0 fi done # Local variables: # eval: (add-hook 'write-file-hooks 'time-stamp) # time-stamp-start: "scriptversion=" # time-stamp-format: "%:y-%02m-%02d.%02H" # time-stamp-time-zone: "UTC" # time-stamp-end: "; # UTC" # End: libstoragemgmt-1.2.3/build-aux/py-compile0000755000175000017500000001107612540163524015363 00000000000000#!/bin/sh # py-compile - Compile a Python program scriptversion=2011-06-08.12; # UTC # Copyright (C) 2000-2013 Free Software Foundation, Inc. # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2, or (at your option) # any later version. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # You should have received a copy of the GNU General Public License # along with this program. If not, see . # As a special exception to the GNU General Public License, if you # distribute this file as part of a program that contains a # configuration script generated by Autoconf, you may include it under # the same distribution terms that you use for the rest of that program. # This file is maintained in Automake, please report # bugs to or send patches to # . if [ -z "$PYTHON" ]; then PYTHON=python fi me=py-compile usage_error () { echo "$me: $*" >&2 echo "Try '$me --help' for more information." >&2 exit 1 } basedir= destdir= while test $# -ne 0; do case "$1" in --basedir) if test $# -lt 2; then usage_error "option '--basedir' requires an argument" else basedir=$2 fi shift ;; --destdir) if test $# -lt 2; then usage_error "option '--destdir' requires an argument" else destdir=$2 fi shift ;; -h|--help) cat <<\EOF Usage: py-compile [--help] [--version] [--basedir DIR] [--destdir DIR] FILES..." Byte compile some python scripts FILES. Use --destdir to specify any leading directory path to the FILES that you don't want to include in the byte compiled file. Specify --basedir for any additional path information you do want to be shown in the byte compiled file. Example: py-compile --destdir /tmp/pkg-root --basedir /usr/share/test test.py test2.py Report bugs to . EOF exit $? ;; -v|--version) echo "$me $scriptversion" exit $? ;; --) shift break ;; -*) usage_error "unrecognized option '$1'" ;; *) break ;; esac shift done files=$* if test -z "$files"; then usage_error "no files given" fi # if basedir was given, then it should be prepended to filenames before # byte compilation. if [ -z "$basedir" ]; then pathtrans="path = file" else pathtrans="path = os.path.join('$basedir', file)" fi # if destdir was given, then it needs to be prepended to the filename to # byte compile but not go into the compiled file. if [ -z "$destdir" ]; then filetrans="filepath = path" else filetrans="filepath = os.path.normpath('$destdir' + os.sep + path)" fi $PYTHON -c " import sys, os, py_compile, imp files = '''$files''' sys.stdout.write('Byte-compiling python modules...\n') for file in files.split(): $pathtrans $filetrans if not os.path.exists(filepath) or not (len(filepath) >= 3 and filepath[-3:] == '.py'): continue sys.stdout.write(file) sys.stdout.flush() if hasattr(imp, 'get_tag'): py_compile.compile(filepath, imp.cache_from_source(filepath), path) else: py_compile.compile(filepath, filepath + 'c', path) sys.stdout.write('\n')" || exit $? # this will fail for python < 1.5, but that doesn't matter ... $PYTHON -O -c " import sys, os, py_compile, imp # pypy does not use .pyo optimization if hasattr(sys, 'pypy_translation_info'): sys.exit(0) files = '''$files''' sys.stdout.write('Byte-compiling python modules (optimized versions) ...\n') for file in files.split(): $pathtrans $filetrans if not os.path.exists(filepath) or not (len(filepath) >= 3 and filepath[-3:] == '.py'): continue sys.stdout.write(file) sys.stdout.flush() if hasattr(imp, 'get_tag'): py_compile.compile(filepath, imp.cache_from_source(filepath, False), path) else: py_compile.compile(filepath, filepath + 'o', path) sys.stdout.write('\n')" 2>/dev/null || : # Local Variables: # mode: shell-script # sh-indentation: 2 # eval: (add-hook 'write-file-hooks 'time-stamp) # time-stamp-start: "scriptversion=" # time-stamp-format: "%:y-%02m-%02d.%02H" # time-stamp-time-zone: "UTC" # time-stamp-end: "; # UTC" # End: libstoragemgmt-1.2.3/build-aux/compile0000755000175000017500000001624512540163524014740 00000000000000#! /bin/sh # Wrapper for compilers which do not understand '-c -o'. scriptversion=2012-10-14.11; # UTC # Copyright (C) 1999-2013 Free Software Foundation, Inc. # Written by Tom Tromey . # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2, or (at your option) # any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see . # As a special exception to the GNU General Public License, if you # distribute this file as part of a program that contains a # configuration script generated by Autoconf, you may include it under # the same distribution terms that you use for the rest of that program. # This file is maintained in Automake, please report # bugs to or send patches to # . nl=' ' # We need space, tab and new line, in precisely that order. Quoting is # there to prevent tools from complaining about whitespace usage. IFS=" "" $nl" file_conv= # func_file_conv build_file lazy # Convert a $build file to $host form and store it in $file # Currently only supports Windows hosts. If the determined conversion # type is listed in (the comma separated) LAZY, no conversion will # take place. func_file_conv () { file=$1 case $file in / | /[!/]*) # absolute file, and not a UNC file if test -z "$file_conv"; then # lazily determine how to convert abs files case `uname -s` in MINGW*) file_conv=mingw ;; CYGWIN*) file_conv=cygwin ;; *) file_conv=wine ;; esac fi case $file_conv/,$2, in *,$file_conv,*) ;; mingw/*) file=`cmd //C echo "$file " | sed -e 's/"\(.*\) " *$/\1/'` ;; cygwin/*) file=`cygpath -m "$file" || echo "$file"` ;; wine/*) file=`winepath -w "$file" || echo "$file"` ;; esac ;; esac } # func_cl_dashL linkdir # Make cl look for libraries in LINKDIR func_cl_dashL () { func_file_conv "$1" if test -z "$lib_path"; then lib_path=$file else lib_path="$lib_path;$file" fi linker_opts="$linker_opts -LIBPATH:$file" } # func_cl_dashl library # Do a library search-path lookup for cl func_cl_dashl () { lib=$1 found=no save_IFS=$IFS IFS=';' for dir in $lib_path $LIB do IFS=$save_IFS if $shared && test -f "$dir/$lib.dll.lib"; then found=yes lib=$dir/$lib.dll.lib break fi if test -f "$dir/$lib.lib"; then found=yes lib=$dir/$lib.lib break fi if test -f "$dir/lib$lib.a"; then found=yes lib=$dir/lib$lib.a break fi done IFS=$save_IFS if test "$found" != yes; then lib=$lib.lib fi } # func_cl_wrapper cl arg... # Adjust compile command to suit cl func_cl_wrapper () { # Assume a capable shell lib_path= shared=: linker_opts= for arg do if test -n "$eat"; then eat= else case $1 in -o) # configure might choose to run compile as 'compile cc -o foo foo.c'. eat=1 case $2 in *.o | *.[oO][bB][jJ]) func_file_conv "$2" set x "$@" -Fo"$file" shift ;; *) func_file_conv "$2" set x "$@" -Fe"$file" shift ;; esac ;; -I) eat=1 func_file_conv "$2" mingw set x "$@" -I"$file" shift ;; -I*) func_file_conv "${1#-I}" mingw set x "$@" -I"$file" shift ;; -l) eat=1 func_cl_dashl "$2" set x "$@" "$lib" shift ;; -l*) func_cl_dashl "${1#-l}" set x "$@" "$lib" shift ;; -L) eat=1 func_cl_dashL "$2" ;; -L*) func_cl_dashL "${1#-L}" ;; -static) shared=false ;; -Wl,*) arg=${1#-Wl,} save_ifs="$IFS"; IFS=',' for flag in $arg; do IFS="$save_ifs" linker_opts="$linker_opts $flag" done IFS="$save_ifs" ;; -Xlinker) eat=1 linker_opts="$linker_opts $2" ;; -*) set x "$@" "$1" shift ;; *.cc | *.CC | *.cxx | *.CXX | *.[cC]++) func_file_conv "$1" set x "$@" -Tp"$file" shift ;; *.c | *.cpp | *.CPP | *.lib | *.LIB | *.Lib | *.OBJ | *.obj | *.[oO]) func_file_conv "$1" mingw set x "$@" "$file" shift ;; *) set x "$@" "$1" shift ;; esac fi shift done if test -n "$linker_opts"; then linker_opts="-link$linker_opts" fi exec "$@" $linker_opts exit 1 } eat= case $1 in '') echo "$0: No command. Try '$0 --help' for more information." 1>&2 exit 1; ;; -h | --h*) cat <<\EOF Usage: compile [--help] [--version] PROGRAM [ARGS] Wrapper for compilers which do not understand '-c -o'. Remove '-o dest.o' from ARGS, run PROGRAM with the remaining arguments, and rename the output as expected. If you are trying to build a whole package this is not the right script to run: please start by reading the file 'INSTALL'. Report bugs to . EOF exit $? ;; -v | --v*) echo "compile $scriptversion" exit $? ;; cl | *[/\\]cl | cl.exe | *[/\\]cl.exe ) func_cl_wrapper "$@" # Doesn't return... ;; esac ofile= cfile= for arg do if test -n "$eat"; then eat= else case $1 in -o) # configure might choose to run compile as 'compile cc -o foo foo.c'. # So we strip '-o arg' only if arg is an object. eat=1 case $2 in *.o | *.obj) ofile=$2 ;; *) set x "$@" -o "$2" shift ;; esac ;; *.c) cfile=$1 set x "$@" "$1" shift ;; *) set x "$@" "$1" shift ;; esac fi shift done if test -z "$ofile" || test -z "$cfile"; then # If no '-o' option was seen then we might have been invoked from a # pattern rule where we don't need one. That is ok -- this is a # normal compilation that the losing compiler can handle. If no # '.c' file was seen then we are probably linking. That is also # ok. exec "$@" fi # Name of file we expect compiler to create. cofile=`echo "$cfile" | sed 's|^.*[\\/]||; s|^[a-zA-Z]:||; s/\.c$/.o/'` # Create the lock directory. # Note: use '[/\\:.-]' here to ensure that we don't use the same name # that we are using for the .o file. Also, base the name on the expected # object file name, since that is what matters with a parallel build. lockdir=`echo "$cofile" | sed -e 's|[/\\:.-]|_|g'`.d while true; do if mkdir "$lockdir" >/dev/null 2>&1; then break fi sleep 1 done # FIXME: race condition here if user kills between mkdir and trap. trap "rmdir '$lockdir'; exit 1" 1 2 15 # Run the compile. "$@" ret=$? if test -f "$cofile"; then test "$cofile" = "$ofile" || mv "$cofile" "$ofile" elif test -f "${cofile}bj"; then test "${cofile}bj" = "$ofile" || mv "${cofile}bj" "$ofile" fi rmdir "$lockdir" exit $ret # Local Variables: # mode: shell-script # sh-indentation: 2 # eval: (add-hook 'write-file-hooks 'time-stamp) # time-stamp-start: "scriptversion=" # time-stamp-format: "%:y-%02m-%02d.%02H" # time-stamp-time-zone: "UTC" # time-stamp-end: "; # UTC" # End: libstoragemgmt-1.2.3/build-aux/depcomp0000755000175000017500000005601612540163524014737 00000000000000#! /bin/sh # depcomp - compile a program generating dependencies as side-effects scriptversion=2013-05-30.07; # UTC # Copyright (C) 1999-2013 Free Software Foundation, Inc. # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2, or (at your option) # any later version. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # You should have received a copy of the GNU General Public License # along with this program. If not, see . # As a special exception to the GNU General Public License, if you # distribute this file as part of a program that contains a # configuration script generated by Autoconf, you may include it under # the same distribution terms that you use for the rest of that program. # Originally written by Alexandre Oliva . case $1 in '') echo "$0: No command. Try '$0 --help' for more information." 1>&2 exit 1; ;; -h | --h*) cat <<\EOF Usage: depcomp [--help] [--version] PROGRAM [ARGS] Run PROGRAMS ARGS to compile a file, generating dependencies as side-effects. Environment variables: depmode Dependency tracking mode. source Source file read by 'PROGRAMS ARGS'. object Object file output by 'PROGRAMS ARGS'. DEPDIR directory where to store dependencies. depfile Dependency file to output. tmpdepfile Temporary file to use when outputting dependencies. libtool Whether libtool is used (yes/no). Report bugs to . EOF exit $? ;; -v | --v*) echo "depcomp $scriptversion" exit $? ;; esac # Get the directory component of the given path, and save it in the # global variables '$dir'. Note that this directory component will # be either empty or ending with a '/' character. This is deliberate. set_dir_from () { case $1 in */*) dir=`echo "$1" | sed -e 's|/[^/]*$|/|'`;; *) dir=;; esac } # Get the suffix-stripped basename of the given path, and save it the # global variable '$base'. set_base_from () { base=`echo "$1" | sed -e 's|^.*/||' -e 's/\.[^.]*$//'` } # If no dependency file was actually created by the compiler invocation, # we still have to create a dummy depfile, to avoid errors with the # Makefile "include basename.Plo" scheme. make_dummy_depfile () { echo "#dummy" > "$depfile" } # Factor out some common post-processing of the generated depfile. # Requires the auxiliary global variable '$tmpdepfile' to be set. aix_post_process_depfile () { # If the compiler actually managed to produce a dependency file, # post-process it. if test -f "$tmpdepfile"; then # Each line is of the form 'foo.o: dependency.h'. # Do two passes, one to just change these to # $object: dependency.h # and one to simply output # dependency.h: # which is needed to avoid the deleted-header problem. { sed -e "s,^.*\.[$lower]*:,$object:," < "$tmpdepfile" sed -e "s,^.*\.[$lower]*:[$tab ]*,," -e 's,$,:,' < "$tmpdepfile" } > "$depfile" rm -f "$tmpdepfile" else make_dummy_depfile fi } # A tabulation character. tab=' ' # A newline character. nl=' ' # Character ranges might be problematic outside the C locale. # These definitions help. upper=ABCDEFGHIJKLMNOPQRSTUVWXYZ lower=abcdefghijklmnopqrstuvwxyz digits=0123456789 alpha=${upper}${lower} if test -z "$depmode" || test -z "$source" || test -z "$object"; then echo "depcomp: Variables source, object and depmode must be set" 1>&2 exit 1 fi # Dependencies for sub/bar.o or sub/bar.obj go into sub/.deps/bar.Po. depfile=${depfile-`echo "$object" | sed 's|[^\\/]*$|'${DEPDIR-.deps}'/&|;s|\.\([^.]*\)$|.P\1|;s|Pobj$|Po|'`} tmpdepfile=${tmpdepfile-`echo "$depfile" | sed 's/\.\([^.]*\)$/.T\1/'`} rm -f "$tmpdepfile" # Avoid interferences from the environment. gccflag= dashmflag= # Some modes work just like other modes, but use different flags. We # parameterize here, but still list the modes in the big case below, # to make depend.m4 easier to write. Note that we *cannot* use a case # here, because this file can only contain one case statement. if test "$depmode" = hp; then # HP compiler uses -M and no extra arg. gccflag=-M depmode=gcc fi if test "$depmode" = dashXmstdout; then # This is just like dashmstdout with a different argument. dashmflag=-xM depmode=dashmstdout fi cygpath_u="cygpath -u -f -" if test "$depmode" = msvcmsys; then # This is just like msvisualcpp but w/o cygpath translation. # Just convert the backslash-escaped backslashes to single forward # slashes to satisfy depend.m4 cygpath_u='sed s,\\\\,/,g' depmode=msvisualcpp fi if test "$depmode" = msvc7msys; then # This is just like msvc7 but w/o cygpath translation. # Just convert the backslash-escaped backslashes to single forward # slashes to satisfy depend.m4 cygpath_u='sed s,\\\\,/,g' depmode=msvc7 fi if test "$depmode" = xlc; then # IBM C/C++ Compilers xlc/xlC can output gcc-like dependency information. gccflag=-qmakedep=gcc,-MF depmode=gcc fi case "$depmode" in gcc3) ## gcc 3 implements dependency tracking that does exactly what ## we want. Yay! Note: for some reason libtool 1.4 doesn't like ## it if -MD -MP comes after the -MF stuff. Hmm. ## Unfortunately, FreeBSD c89 acceptance of flags depends upon ## the command line argument order; so add the flags where they ## appear in depend2.am. Note that the slowdown incurred here ## affects only configure: in makefiles, %FASTDEP% shortcuts this. for arg do case $arg in -c) set fnord "$@" -MT "$object" -MD -MP -MF "$tmpdepfile" "$arg" ;; *) set fnord "$@" "$arg" ;; esac shift # fnord shift # $arg done "$@" stat=$? if test $stat -ne 0; then rm -f "$tmpdepfile" exit $stat fi mv "$tmpdepfile" "$depfile" ;; gcc) ## Note that this doesn't just cater to obsosete pre-3.x GCC compilers. ## but also to in-use compilers like IMB xlc/xlC and the HP C compiler. ## (see the conditional assignment to $gccflag above). ## There are various ways to get dependency output from gcc. Here's ## why we pick this rather obscure method: ## - Don't want to use -MD because we'd like the dependencies to end ## up in a subdir. Having to rename by hand is ugly. ## (We might end up doing this anyway to support other compilers.) ## - The DEPENDENCIES_OUTPUT environment variable makes gcc act like ## -MM, not -M (despite what the docs say). Also, it might not be ## supported by the other compilers which use the 'gcc' depmode. ## - Using -M directly means running the compiler twice (even worse ## than renaming). if test -z "$gccflag"; then gccflag=-MD, fi "$@" -Wp,"$gccflag$tmpdepfile" stat=$? if test $stat -ne 0; then rm -f "$tmpdepfile" exit $stat fi rm -f "$depfile" echo "$object : \\" > "$depfile" # The second -e expression handles DOS-style file names with drive # letters. sed -e 's/^[^:]*: / /' \ -e 's/^['$alpha']:\/[^:]*: / /' < "$tmpdepfile" >> "$depfile" ## This next piece of magic avoids the "deleted header file" problem. ## The problem is that when a header file which appears in a .P file ## is deleted, the dependency causes make to die (because there is ## typically no way to rebuild the header). We avoid this by adding ## dummy dependencies for each header file. Too bad gcc doesn't do ## this for us directly. ## Some versions of gcc put a space before the ':'. On the theory ## that the space means something, we add a space to the output as ## well. hp depmode also adds that space, but also prefixes the VPATH ## to the object. Take care to not repeat it in the output. ## Some versions of the HPUX 10.20 sed can't process this invocation ## correctly. Breaking it into two sed invocations is a workaround. tr ' ' "$nl" < "$tmpdepfile" \ | sed -e 's/^\\$//' -e '/^$/d' -e "s|.*$object$||" -e '/:$/d' \ | sed -e 's/$/ :/' >> "$depfile" rm -f "$tmpdepfile" ;; hp) # This case exists only to let depend.m4 do its work. It works by # looking at the text of this script. This case will never be run, # since it is checked for above. exit 1 ;; sgi) if test "$libtool" = yes; then "$@" "-Wp,-MDupdate,$tmpdepfile" else "$@" -MDupdate "$tmpdepfile" fi stat=$? if test $stat -ne 0; then rm -f "$tmpdepfile" exit $stat fi rm -f "$depfile" if test -f "$tmpdepfile"; then # yes, the sourcefile depend on other files echo "$object : \\" > "$depfile" # Clip off the initial element (the dependent). Don't try to be # clever and replace this with sed code, as IRIX sed won't handle # lines with more than a fixed number of characters (4096 in # IRIX 6.2 sed, 8192 in IRIX 6.5). We also remove comment lines; # the IRIX cc adds comments like '#:fec' to the end of the # dependency line. tr ' ' "$nl" < "$tmpdepfile" \ | sed -e 's/^.*\.o://' -e 's/#.*$//' -e '/^$/ d' \ | tr "$nl" ' ' >> "$depfile" echo >> "$depfile" # The second pass generates a dummy entry for each header file. tr ' ' "$nl" < "$tmpdepfile" \ | sed -e 's/^.*\.o://' -e 's/#.*$//' -e '/^$/ d' -e 's/$/:/' \ >> "$depfile" else make_dummy_depfile fi rm -f "$tmpdepfile" ;; xlc) # This case exists only to let depend.m4 do its work. It works by # looking at the text of this script. This case will never be run, # since it is checked for above. exit 1 ;; aix) # The C for AIX Compiler uses -M and outputs the dependencies # in a .u file. In older versions, this file always lives in the # current directory. Also, the AIX compiler puts '$object:' at the # start of each line; $object doesn't have directory information. # Version 6 uses the directory in both cases. set_dir_from "$object" set_base_from "$object" if test "$libtool" = yes; then tmpdepfile1=$dir$base.u tmpdepfile2=$base.u tmpdepfile3=$dir.libs/$base.u "$@" -Wc,-M else tmpdepfile1=$dir$base.u tmpdepfile2=$dir$base.u tmpdepfile3=$dir$base.u "$@" -M fi stat=$? if test $stat -ne 0; then rm -f "$tmpdepfile1" "$tmpdepfile2" "$tmpdepfile3" exit $stat fi for tmpdepfile in "$tmpdepfile1" "$tmpdepfile2" "$tmpdepfile3" do test -f "$tmpdepfile" && break done aix_post_process_depfile ;; tcc) # tcc (Tiny C Compiler) understand '-MD -MF file' since version 0.9.26 # FIXME: That version still under development at the moment of writing. # Make that this statement remains true also for stable, released # versions. # It will wrap lines (doesn't matter whether long or short) with a # trailing '\', as in: # # foo.o : \ # foo.c \ # foo.h \ # # It will put a trailing '\' even on the last line, and will use leading # spaces rather than leading tabs (at least since its commit 0394caf7 # "Emit spaces for -MD"). "$@" -MD -MF "$tmpdepfile" stat=$? if test $stat -ne 0; then rm -f "$tmpdepfile" exit $stat fi rm -f "$depfile" # Each non-empty line is of the form 'foo.o : \' or ' dep.h \'. # We have to change lines of the first kind to '$object: \'. sed -e "s|.*:|$object :|" < "$tmpdepfile" > "$depfile" # And for each line of the second kind, we have to emit a 'dep.h:' # dummy dependency, to avoid the deleted-header problem. sed -n -e 's|^ *\(.*\) *\\$|\1:|p' < "$tmpdepfile" >> "$depfile" rm -f "$tmpdepfile" ;; ## The order of this option in the case statement is important, since the ## shell code in configure will try each of these formats in the order ## listed in this file. A plain '-MD' option would be understood by many ## compilers, so we must ensure this comes after the gcc and icc options. pgcc) # Portland's C compiler understands '-MD'. # Will always output deps to 'file.d' where file is the root name of the # source file under compilation, even if file resides in a subdirectory. # The object file name does not affect the name of the '.d' file. # pgcc 10.2 will output # foo.o: sub/foo.c sub/foo.h # and will wrap long lines using '\' : # foo.o: sub/foo.c ... \ # sub/foo.h ... \ # ... set_dir_from "$object" # Use the source, not the object, to determine the base name, since # that's sadly what pgcc will do too. set_base_from "$source" tmpdepfile=$base.d # For projects that build the same source file twice into different object # files, the pgcc approach of using the *source* file root name can cause # problems in parallel builds. Use a locking strategy to avoid stomping on # the same $tmpdepfile. lockdir=$base.d-lock trap " echo '$0: caught signal, cleaning up...' >&2 rmdir '$lockdir' exit 1 " 1 2 13 15 numtries=100 i=$numtries while test $i -gt 0; do # mkdir is a portable test-and-set. if mkdir "$lockdir" 2>/dev/null; then # This process acquired the lock. "$@" -MD stat=$? # Release the lock. rmdir "$lockdir" break else # If the lock is being held by a different process, wait # until the winning process is done or we timeout. while test -d "$lockdir" && test $i -gt 0; do sleep 1 i=`expr $i - 1` done fi i=`expr $i - 1` done trap - 1 2 13 15 if test $i -le 0; then echo "$0: failed to acquire lock after $numtries attempts" >&2 echo "$0: check lockdir '$lockdir'" >&2 exit 1 fi if test $stat -ne 0; then rm -f "$tmpdepfile" exit $stat fi rm -f "$depfile" # Each line is of the form `foo.o: dependent.h', # or `foo.o: dep1.h dep2.h \', or ` dep3.h dep4.h \'. # Do two passes, one to just change these to # `$object: dependent.h' and one to simply `dependent.h:'. sed "s,^[^:]*:,$object :," < "$tmpdepfile" > "$depfile" # Some versions of the HPUX 10.20 sed can't process this invocation # correctly. Breaking it into two sed invocations is a workaround. sed 's,^[^:]*: \(.*\)$,\1,;s/^\\$//;/^$/d;/:$/d' < "$tmpdepfile" \ | sed -e 's/$/ :/' >> "$depfile" rm -f "$tmpdepfile" ;; hp2) # The "hp" stanza above does not work with aCC (C++) and HP's ia64 # compilers, which have integrated preprocessors. The correct option # to use with these is +Maked; it writes dependencies to a file named # 'foo.d', which lands next to the object file, wherever that # happens to be. # Much of this is similar to the tru64 case; see comments there. set_dir_from "$object" set_base_from "$object" if test "$libtool" = yes; then tmpdepfile1=$dir$base.d tmpdepfile2=$dir.libs/$base.d "$@" -Wc,+Maked else tmpdepfile1=$dir$base.d tmpdepfile2=$dir$base.d "$@" +Maked fi stat=$? if test $stat -ne 0; then rm -f "$tmpdepfile1" "$tmpdepfile2" exit $stat fi for tmpdepfile in "$tmpdepfile1" "$tmpdepfile2" do test -f "$tmpdepfile" && break done if test -f "$tmpdepfile"; then sed -e "s,^.*\.[$lower]*:,$object:," "$tmpdepfile" > "$depfile" # Add 'dependent.h:' lines. sed -ne '2,${ s/^ *// s/ \\*$// s/$/:/ p }' "$tmpdepfile" >> "$depfile" else make_dummy_depfile fi rm -f "$tmpdepfile" "$tmpdepfile2" ;; tru64) # The Tru64 compiler uses -MD to generate dependencies as a side # effect. 'cc -MD -o foo.o ...' puts the dependencies into 'foo.o.d'. # At least on Alpha/Redhat 6.1, Compaq CCC V6.2-504 seems to put # dependencies in 'foo.d' instead, so we check for that too. # Subdirectories are respected. set_dir_from "$object" set_base_from "$object" if test "$libtool" = yes; then # Libtool generates 2 separate objects for the 2 libraries. These # two compilations output dependencies in $dir.libs/$base.o.d and # in $dir$base.o.d. We have to check for both files, because # one of the two compilations can be disabled. We should prefer # $dir$base.o.d over $dir.libs/$base.o.d because the latter is # automatically cleaned when .libs/ is deleted, while ignoring # the former would cause a distcleancheck panic. tmpdepfile1=$dir$base.o.d # libtool 1.5 tmpdepfile2=$dir.libs/$base.o.d # Likewise. tmpdepfile3=$dir.libs/$base.d # Compaq CCC V6.2-504 "$@" -Wc,-MD else tmpdepfile1=$dir$base.d tmpdepfile2=$dir$base.d tmpdepfile3=$dir$base.d "$@" -MD fi stat=$? if test $stat -ne 0; then rm -f "$tmpdepfile1" "$tmpdepfile2" "$tmpdepfile3" exit $stat fi for tmpdepfile in "$tmpdepfile1" "$tmpdepfile2" "$tmpdepfile3" do test -f "$tmpdepfile" && break done # Same post-processing that is required for AIX mode. aix_post_process_depfile ;; msvc7) if test "$libtool" = yes; then showIncludes=-Wc,-showIncludes else showIncludes=-showIncludes fi "$@" $showIncludes > "$tmpdepfile" stat=$? grep -v '^Note: including file: ' "$tmpdepfile" if test $stat -ne 0; then rm -f "$tmpdepfile" exit $stat fi rm -f "$depfile" echo "$object : \\" > "$depfile" # The first sed program below extracts the file names and escapes # backslashes for cygpath. The second sed program outputs the file # name when reading, but also accumulates all include files in the # hold buffer in order to output them again at the end. This only # works with sed implementations that can handle large buffers. sed < "$tmpdepfile" -n ' /^Note: including file: *\(.*\)/ { s//\1/ s/\\/\\\\/g p }' | $cygpath_u | sort -u | sed -n ' s/ /\\ /g s/\(.*\)/'"$tab"'\1 \\/p s/.\(.*\) \\/\1:/ H $ { s/.*/'"$tab"'/ G p }' >> "$depfile" echo >> "$depfile" # make sure the fragment doesn't end with a backslash rm -f "$tmpdepfile" ;; msvc7msys) # This case exists only to let depend.m4 do its work. It works by # looking at the text of this script. This case will never be run, # since it is checked for above. exit 1 ;; #nosideeffect) # This comment above is used by automake to tell side-effect # dependency tracking mechanisms from slower ones. dashmstdout) # Important note: in order to support this mode, a compiler *must* # always write the preprocessed file to stdout, regardless of -o. "$@" || exit $? # Remove the call to Libtool. if test "$libtool" = yes; then while test "X$1" != 'X--mode=compile'; do shift done shift fi # Remove '-o $object'. IFS=" " for arg do case $arg in -o) shift ;; $object) shift ;; *) set fnord "$@" "$arg" shift # fnord shift # $arg ;; esac done test -z "$dashmflag" && dashmflag=-M # Require at least two characters before searching for ':' # in the target name. This is to cope with DOS-style filenames: # a dependency such as 'c:/foo/bar' could be seen as target 'c' otherwise. "$@" $dashmflag | sed "s|^[$tab ]*[^:$tab ][^:][^:]*:[$tab ]*|$object: |" > "$tmpdepfile" rm -f "$depfile" cat < "$tmpdepfile" > "$depfile" # Some versions of the HPUX 10.20 sed can't process this sed invocation # correctly. Breaking it into two sed invocations is a workaround. tr ' ' "$nl" < "$tmpdepfile" \ | sed -e 's/^\\$//' -e '/^$/d' -e '/:$/d' \ | sed -e 's/$/ :/' >> "$depfile" rm -f "$tmpdepfile" ;; dashXmstdout) # This case only exists to satisfy depend.m4. It is never actually # run, as this mode is specially recognized in the preamble. exit 1 ;; makedepend) "$@" || exit $? # Remove any Libtool call if test "$libtool" = yes; then while test "X$1" != 'X--mode=compile'; do shift done shift fi # X makedepend shift cleared=no eat=no for arg do case $cleared in no) set ""; shift cleared=yes ;; esac if test $eat = yes; then eat=no continue fi case "$arg" in -D*|-I*) set fnord "$@" "$arg"; shift ;; # Strip any option that makedepend may not understand. Remove # the object too, otherwise makedepend will parse it as a source file. -arch) eat=yes ;; -*|$object) ;; *) set fnord "$@" "$arg"; shift ;; esac done obj_suffix=`echo "$object" | sed 's/^.*\././'` touch "$tmpdepfile" ${MAKEDEPEND-makedepend} -o"$obj_suffix" -f"$tmpdepfile" "$@" rm -f "$depfile" # makedepend may prepend the VPATH from the source file name to the object. # No need to regex-escape $object, excess matching of '.' is harmless. sed "s|^.*\($object *:\)|\1|" "$tmpdepfile" > "$depfile" # Some versions of the HPUX 10.20 sed can't process the last invocation # correctly. Breaking it into two sed invocations is a workaround. sed '1,2d' "$tmpdepfile" \ | tr ' ' "$nl" \ | sed -e 's/^\\$//' -e '/^$/d' -e '/:$/d' \ | sed -e 's/$/ :/' >> "$depfile" rm -f "$tmpdepfile" "$tmpdepfile".bak ;; cpp) # Important note: in order to support this mode, a compiler *must* # always write the preprocessed file to stdout. "$@" || exit $? # Remove the call to Libtool. if test "$libtool" = yes; then while test "X$1" != 'X--mode=compile'; do shift done shift fi # Remove '-o $object'. IFS=" " for arg do case $arg in -o) shift ;; $object) shift ;; *) set fnord "$@" "$arg" shift # fnord shift # $arg ;; esac done "$@" -E \ | sed -n -e '/^# [0-9][0-9]* "\([^"]*\)".*/ s:: \1 \\:p' \ -e '/^#line [0-9][0-9]* "\([^"]*\)".*/ s:: \1 \\:p' \ | sed '$ s: \\$::' > "$tmpdepfile" rm -f "$depfile" echo "$object : \\" > "$depfile" cat < "$tmpdepfile" >> "$depfile" sed < "$tmpdepfile" '/^$/d;s/^ //;s/ \\$//;s/$/ :/' >> "$depfile" rm -f "$tmpdepfile" ;; msvisualcpp) # Important note: in order to support this mode, a compiler *must* # always write the preprocessed file to stdout. "$@" || exit $? # Remove the call to Libtool. if test "$libtool" = yes; then while test "X$1" != 'X--mode=compile'; do shift done shift fi IFS=" " for arg do case "$arg" in -o) shift ;; $object) shift ;; "-Gm"|"/Gm"|"-Gi"|"/Gi"|"-ZI"|"/ZI") set fnord "$@" shift shift ;; *) set fnord "$@" "$arg" shift shift ;; esac done "$@" -E 2>/dev/null | sed -n '/^#line [0-9][0-9]* "\([^"]*\)"/ s::\1:p' | $cygpath_u | sort -u > "$tmpdepfile" rm -f "$depfile" echo "$object : \\" > "$depfile" sed < "$tmpdepfile" -n -e 's% %\\ %g' -e '/^\(.*\)$/ s::'"$tab"'\1 \\:p' >> "$depfile" echo "$tab" >> "$depfile" sed < "$tmpdepfile" -n -e 's% %\\ %g' -e '/^\(.*\)$/ s::\1\::p' >> "$depfile" rm -f "$tmpdepfile" ;; msvcmsys) # This case exists only to let depend.m4 do its work. It works by # looking at the text of this script. This case will never be run, # since it is checked for above. exit 1 ;; none) exec "$@" ;; *) echo "Unknown depmode $depmode" 1>&2 exit 1 ;; esac exit 0 # Local Variables: # mode: shell-script # sh-indentation: 2 # eval: (add-hook 'write-file-hooks 'time-stamp) # time-stamp-start: "scriptversion=" # time-stamp-format: "%:y-%02m-%02d.%02H" # time-stamp-time-zone: "UTC" # time-stamp-end: "; # UTC" # End: libstoragemgmt-1.2.3/build-aux/config.guess0000755000175000017500000013036112540163524015676 00000000000000#! /bin/sh # Attempt to guess a canonical system name. # Copyright 1992-2013 Free Software Foundation, Inc. timestamp='2013-06-10' # This file is free software; you can redistribute it and/or modify it # under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, see . # # As a special exception to the GNU General Public License, if you # distribute this file as part of a program that contains a # configuration script generated by Autoconf, you may include it under # the same distribution terms that you use for the rest of that # program. This Exception is an additional permission under section 7 # of the GNU General Public License, version 3 ("GPLv3"). # # Originally written by Per Bothner. # # You can get the latest version of this script from: # http://git.savannah.gnu.org/gitweb/?p=config.git;a=blob_plain;f=config.guess;hb=HEAD # # Please send patches with a ChangeLog entry to config-patches@gnu.org. me=`echo "$0" | sed -e 's,.*/,,'` usage="\ Usage: $0 [OPTION] Output the configuration name of the system \`$me' is run on. Operation modes: -h, --help print this help, then exit -t, --time-stamp print date of last modification, then exit -v, --version print version number, then exit Report bugs and patches to ." version="\ GNU config.guess ($timestamp) Originally written by Per Bothner. Copyright 1992-2013 Free Software Foundation, Inc. This is free software; see the source for copying conditions. There is NO warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE." help=" Try \`$me --help' for more information." # Parse command line while test $# -gt 0 ; do case $1 in --time-stamp | --time* | -t ) echo "$timestamp" ; exit ;; --version | -v ) echo "$version" ; exit ;; --help | --h* | -h ) echo "$usage"; exit ;; -- ) # Stop option processing shift; break ;; - ) # Use stdin as input. break ;; -* ) echo "$me: invalid option $1$help" >&2 exit 1 ;; * ) break ;; esac done if test $# != 0; then echo "$me: too many arguments$help" >&2 exit 1 fi trap 'exit 1' 1 2 15 # CC_FOR_BUILD -- compiler used by this script. Note that the use of a # compiler to aid in system detection is discouraged as it requires # temporary files to be created and, as you can see below, it is a # headache to deal with in a portable fashion. # Historically, `CC_FOR_BUILD' used to be named `HOST_CC'. We still # use `HOST_CC' if defined, but it is deprecated. # Portable tmp directory creation inspired by the Autoconf team. set_cc_for_build=' trap "exitcode=\$?; (rm -f \$tmpfiles 2>/dev/null; rmdir \$tmp 2>/dev/null) && exit \$exitcode" 0 ; trap "rm -f \$tmpfiles 2>/dev/null; rmdir \$tmp 2>/dev/null; exit 1" 1 2 13 15 ; : ${TMPDIR=/tmp} ; { tmp=`(umask 077 && mktemp -d "$TMPDIR/cgXXXXXX") 2>/dev/null` && test -n "$tmp" && test -d "$tmp" ; } || { test -n "$RANDOM" && tmp=$TMPDIR/cg$$-$RANDOM && (umask 077 && mkdir $tmp) ; } || { tmp=$TMPDIR/cg-$$ && (umask 077 && mkdir $tmp) && echo "Warning: creating insecure temp directory" >&2 ; } || { echo "$me: cannot create a temporary directory in $TMPDIR" >&2 ; exit 1 ; } ; dummy=$tmp/dummy ; tmpfiles="$dummy.c $dummy.o $dummy.rel $dummy" ; case $CC_FOR_BUILD,$HOST_CC,$CC in ,,) echo "int x;" > $dummy.c ; for c in cc gcc c89 c99 ; do if ($c -c -o $dummy.o $dummy.c) >/dev/null 2>&1 ; then CC_FOR_BUILD="$c"; break ; fi ; done ; if test x"$CC_FOR_BUILD" = x ; then CC_FOR_BUILD=no_compiler_found ; fi ;; ,,*) CC_FOR_BUILD=$CC ;; ,*,*) CC_FOR_BUILD=$HOST_CC ;; esac ; set_cc_for_build= ;' # This is needed to find uname on a Pyramid OSx when run in the BSD universe. # (ghazi@noc.rutgers.edu 1994-08-24) if (test -f /.attbin/uname) >/dev/null 2>&1 ; then PATH=$PATH:/.attbin ; export PATH fi UNAME_MACHINE=`(uname -m) 2>/dev/null` || UNAME_MACHINE=unknown UNAME_RELEASE=`(uname -r) 2>/dev/null` || UNAME_RELEASE=unknown UNAME_SYSTEM=`(uname -s) 2>/dev/null` || UNAME_SYSTEM=unknown UNAME_VERSION=`(uname -v) 2>/dev/null` || UNAME_VERSION=unknown case "${UNAME_SYSTEM}" in Linux|GNU|GNU/*) # If the system lacks a compiler, then just pick glibc. # We could probably try harder. LIBC=gnu eval $set_cc_for_build cat <<-EOF > $dummy.c #include #if defined(__UCLIBC__) LIBC=uclibc #elif defined(__dietlibc__) LIBC=dietlibc #else LIBC=gnu #endif EOF eval `$CC_FOR_BUILD -E $dummy.c 2>/dev/null | grep '^LIBC'` ;; esac # Note: order is significant - the case branches are not exclusive. case "${UNAME_MACHINE}:${UNAME_SYSTEM}:${UNAME_RELEASE}:${UNAME_VERSION}" in *:NetBSD:*:*) # NetBSD (nbsd) targets should (where applicable) match one or # more of the tuples: *-*-netbsdelf*, *-*-netbsdaout*, # *-*-netbsdecoff* and *-*-netbsd*. For targets that recently # switched to ELF, *-*-netbsd* would select the old # object file format. This provides both forward # compatibility and a consistent mechanism for selecting the # object file format. # # Note: NetBSD doesn't particularly care about the vendor # portion of the name. We always set it to "unknown". sysctl="sysctl -n hw.machine_arch" UNAME_MACHINE_ARCH=`(/sbin/$sysctl 2>/dev/null || \ /usr/sbin/$sysctl 2>/dev/null || echo unknown)` case "${UNAME_MACHINE_ARCH}" in armeb) machine=armeb-unknown ;; arm*) machine=arm-unknown ;; sh3el) machine=shl-unknown ;; sh3eb) machine=sh-unknown ;; sh5el) machine=sh5le-unknown ;; *) machine=${UNAME_MACHINE_ARCH}-unknown ;; esac # The Operating System including object format, if it has switched # to ELF recently, or will in the future. case "${UNAME_MACHINE_ARCH}" in arm*|i386|m68k|ns32k|sh3*|sparc|vax) eval $set_cc_for_build if echo __ELF__ | $CC_FOR_BUILD -E - 2>/dev/null \ | grep -q __ELF__ then # Once all utilities can be ECOFF (netbsdecoff) or a.out (netbsdaout). # Return netbsd for either. FIX? os=netbsd else os=netbsdelf fi ;; *) os=netbsd ;; esac # The OS release # Debian GNU/NetBSD machines have a different userland, and # thus, need a distinct triplet. However, they do not need # kernel version information, so it can be replaced with a # suitable tag, in the style of linux-gnu. case "${UNAME_VERSION}" in Debian*) release='-gnu' ;; *) release=`echo ${UNAME_RELEASE}|sed -e 's/[-_].*/\./'` ;; esac # Since CPU_TYPE-MANUFACTURER-KERNEL-OPERATING_SYSTEM: # contains redundant information, the shorter form: # CPU_TYPE-MANUFACTURER-OPERATING_SYSTEM is used. echo "${machine}-${os}${release}" exit ;; *:Bitrig:*:*) UNAME_MACHINE_ARCH=`arch | sed 's/Bitrig.//'` echo ${UNAME_MACHINE_ARCH}-unknown-bitrig${UNAME_RELEASE} exit ;; *:OpenBSD:*:*) UNAME_MACHINE_ARCH=`arch | sed 's/OpenBSD.//'` echo ${UNAME_MACHINE_ARCH}-unknown-openbsd${UNAME_RELEASE} exit ;; *:ekkoBSD:*:*) echo ${UNAME_MACHINE}-unknown-ekkobsd${UNAME_RELEASE} exit ;; *:SolidBSD:*:*) echo ${UNAME_MACHINE}-unknown-solidbsd${UNAME_RELEASE} exit ;; macppc:MirBSD:*:*) echo powerpc-unknown-mirbsd${UNAME_RELEASE} exit ;; *:MirBSD:*:*) echo ${UNAME_MACHINE}-unknown-mirbsd${UNAME_RELEASE} exit ;; alpha:OSF1:*:*) case $UNAME_RELEASE in *4.0) UNAME_RELEASE=`/usr/sbin/sizer -v | awk '{print $3}'` ;; *5.*) UNAME_RELEASE=`/usr/sbin/sizer -v | awk '{print $4}'` ;; esac # According to Compaq, /usr/sbin/psrinfo has been available on # OSF/1 and Tru64 systems produced since 1995. I hope that # covers most systems running today. This code pipes the CPU # types through head -n 1, so we only detect the type of CPU 0. ALPHA_CPU_TYPE=`/usr/sbin/psrinfo -v | sed -n -e 's/^ The alpha \(.*\) processor.*$/\1/p' | head -n 1` case "$ALPHA_CPU_TYPE" in "EV4 (21064)") UNAME_MACHINE="alpha" ;; "EV4.5 (21064)") UNAME_MACHINE="alpha" ;; "LCA4 (21066/21068)") UNAME_MACHINE="alpha" ;; "EV5 (21164)") UNAME_MACHINE="alphaev5" ;; "EV5.6 (21164A)") UNAME_MACHINE="alphaev56" ;; "EV5.6 (21164PC)") UNAME_MACHINE="alphapca56" ;; "EV5.7 (21164PC)") UNAME_MACHINE="alphapca57" ;; "EV6 (21264)") UNAME_MACHINE="alphaev6" ;; "EV6.7 (21264A)") UNAME_MACHINE="alphaev67" ;; "EV6.8CB (21264C)") UNAME_MACHINE="alphaev68" ;; "EV6.8AL (21264B)") UNAME_MACHINE="alphaev68" ;; "EV6.8CX (21264D)") UNAME_MACHINE="alphaev68" ;; "EV6.9A (21264/EV69A)") UNAME_MACHINE="alphaev69" ;; "EV7 (21364)") UNAME_MACHINE="alphaev7" ;; "EV7.9 (21364A)") UNAME_MACHINE="alphaev79" ;; esac # A Pn.n version is a patched version. # A Vn.n version is a released version. # A Tn.n version is a released field test version. # A Xn.n version is an unreleased experimental baselevel. # 1.2 uses "1.2" for uname -r. echo ${UNAME_MACHINE}-dec-osf`echo ${UNAME_RELEASE} | sed -e 's/^[PVTX]//' | tr 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' 'abcdefghijklmnopqrstuvwxyz'` # Reset EXIT trap before exiting to avoid spurious non-zero exit code. exitcode=$? trap '' 0 exit $exitcode ;; Alpha\ *:Windows_NT*:*) # How do we know it's Interix rather than the generic POSIX subsystem? # Should we change UNAME_MACHINE based on the output of uname instead # of the specific Alpha model? echo alpha-pc-interix exit ;; 21064:Windows_NT:50:3) echo alpha-dec-winnt3.5 exit ;; Amiga*:UNIX_System_V:4.0:*) echo m68k-unknown-sysv4 exit ;; *:[Aa]miga[Oo][Ss]:*:*) echo ${UNAME_MACHINE}-unknown-amigaos exit ;; *:[Mm]orph[Oo][Ss]:*:*) echo ${UNAME_MACHINE}-unknown-morphos exit ;; *:OS/390:*:*) echo i370-ibm-openedition exit ;; *:z/VM:*:*) echo s390-ibm-zvmoe exit ;; *:OS400:*:*) echo powerpc-ibm-os400 exit ;; arm:RISC*:1.[012]*:*|arm:riscix:1.[012]*:*) echo arm-acorn-riscix${UNAME_RELEASE} exit ;; arm*:riscos:*:*|arm*:RISCOS:*:*) echo arm-unknown-riscos exit ;; SR2?01:HI-UX/MPP:*:* | SR8000:HI-UX/MPP:*:*) echo hppa1.1-hitachi-hiuxmpp exit ;; Pyramid*:OSx*:*:* | MIS*:OSx*:*:* | MIS*:SMP_DC-OSx*:*:*) # akee@wpdis03.wpafb.af.mil (Earle F. Ake) contributed MIS and NILE. if test "`(/bin/universe) 2>/dev/null`" = att ; then echo pyramid-pyramid-sysv3 else echo pyramid-pyramid-bsd fi exit ;; NILE*:*:*:dcosx) echo pyramid-pyramid-svr4 exit ;; DRS?6000:unix:4.0:6*) echo sparc-icl-nx6 exit ;; DRS?6000:UNIX_SV:4.2*:7* | DRS?6000:isis:4.2*:7*) case `/usr/bin/uname -p` in sparc) echo sparc-icl-nx7; exit ;; esac ;; s390x:SunOS:*:*) echo ${UNAME_MACHINE}-ibm-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'` exit ;; sun4H:SunOS:5.*:*) echo sparc-hal-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'` exit ;; sun4*:SunOS:5.*:* | tadpole*:SunOS:5.*:*) echo sparc-sun-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'` exit ;; i86pc:AuroraUX:5.*:* | i86xen:AuroraUX:5.*:*) echo i386-pc-auroraux${UNAME_RELEASE} exit ;; i86pc:SunOS:5.*:* | i86xen:SunOS:5.*:*) eval $set_cc_for_build SUN_ARCH="i386" # If there is a compiler, see if it is configured for 64-bit objects. # Note that the Sun cc does not turn __LP64__ into 1 like gcc does. # This test works for both compilers. if [ "$CC_FOR_BUILD" != 'no_compiler_found' ]; then if (echo '#ifdef __amd64'; echo IS_64BIT_ARCH; echo '#endif') | \ (CCOPTS= $CC_FOR_BUILD -E - 2>/dev/null) | \ grep IS_64BIT_ARCH >/dev/null then SUN_ARCH="x86_64" fi fi echo ${SUN_ARCH}-pc-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'` exit ;; sun4*:SunOS:6*:*) # According to config.sub, this is the proper way to canonicalize # SunOS6. Hard to guess exactly what SunOS6 will be like, but # it's likely to be more like Solaris than SunOS4. echo sparc-sun-solaris3`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'` exit ;; sun4*:SunOS:*:*) case "`/usr/bin/arch -k`" in Series*|S4*) UNAME_RELEASE=`uname -v` ;; esac # Japanese Language versions have a version number like `4.1.3-JL'. echo sparc-sun-sunos`echo ${UNAME_RELEASE}|sed -e 's/-/_/'` exit ;; sun3*:SunOS:*:*) echo m68k-sun-sunos${UNAME_RELEASE} exit ;; sun*:*:4.2BSD:*) UNAME_RELEASE=`(sed 1q /etc/motd | awk '{print substr($5,1,3)}') 2>/dev/null` test "x${UNAME_RELEASE}" = "x" && UNAME_RELEASE=3 case "`/bin/arch`" in sun3) echo m68k-sun-sunos${UNAME_RELEASE} ;; sun4) echo sparc-sun-sunos${UNAME_RELEASE} ;; esac exit ;; aushp:SunOS:*:*) echo sparc-auspex-sunos${UNAME_RELEASE} exit ;; # The situation for MiNT is a little confusing. The machine name # can be virtually everything (everything which is not # "atarist" or "atariste" at least should have a processor # > m68000). The system name ranges from "MiNT" over "FreeMiNT" # to the lowercase version "mint" (or "freemint"). Finally # the system name "TOS" denotes a system which is actually not # MiNT. But MiNT is downward compatible to TOS, so this should # be no problem. atarist[e]:*MiNT:*:* | atarist[e]:*mint:*:* | atarist[e]:*TOS:*:*) echo m68k-atari-mint${UNAME_RELEASE} exit ;; atari*:*MiNT:*:* | atari*:*mint:*:* | atarist[e]:*TOS:*:*) echo m68k-atari-mint${UNAME_RELEASE} exit ;; *falcon*:*MiNT:*:* | *falcon*:*mint:*:* | *falcon*:*TOS:*:*) echo m68k-atari-mint${UNAME_RELEASE} exit ;; milan*:*MiNT:*:* | milan*:*mint:*:* | *milan*:*TOS:*:*) echo m68k-milan-mint${UNAME_RELEASE} exit ;; hades*:*MiNT:*:* | hades*:*mint:*:* | *hades*:*TOS:*:*) echo m68k-hades-mint${UNAME_RELEASE} exit ;; *:*MiNT:*:* | *:*mint:*:* | *:*TOS:*:*) echo m68k-unknown-mint${UNAME_RELEASE} exit ;; m68k:machten:*:*) echo m68k-apple-machten${UNAME_RELEASE} exit ;; powerpc:machten:*:*) echo powerpc-apple-machten${UNAME_RELEASE} exit ;; RISC*:Mach:*:*) echo mips-dec-mach_bsd4.3 exit ;; RISC*:ULTRIX:*:*) echo mips-dec-ultrix${UNAME_RELEASE} exit ;; VAX*:ULTRIX*:*:*) echo vax-dec-ultrix${UNAME_RELEASE} exit ;; 2020:CLIX:*:* | 2430:CLIX:*:*) echo clipper-intergraph-clix${UNAME_RELEASE} exit ;; mips:*:*:UMIPS | mips:*:*:RISCos) eval $set_cc_for_build sed 's/^ //' << EOF >$dummy.c #ifdef __cplusplus #include /* for printf() prototype */ int main (int argc, char *argv[]) { #else int main (argc, argv) int argc; char *argv[]; { #endif #if defined (host_mips) && defined (MIPSEB) #if defined (SYSTYPE_SYSV) printf ("mips-mips-riscos%ssysv\n", argv[1]); exit (0); #endif #if defined (SYSTYPE_SVR4) printf ("mips-mips-riscos%ssvr4\n", argv[1]); exit (0); #endif #if defined (SYSTYPE_BSD43) || defined(SYSTYPE_BSD) printf ("mips-mips-riscos%sbsd\n", argv[1]); exit (0); #endif #endif exit (-1); } EOF $CC_FOR_BUILD -o $dummy $dummy.c && dummyarg=`echo "${UNAME_RELEASE}" | sed -n 's/\([0-9]*\).*/\1/p'` && SYSTEM_NAME=`$dummy $dummyarg` && { echo "$SYSTEM_NAME"; exit; } echo mips-mips-riscos${UNAME_RELEASE} exit ;; Motorola:PowerMAX_OS:*:*) echo powerpc-motorola-powermax exit ;; Motorola:*:4.3:PL8-*) echo powerpc-harris-powermax exit ;; Night_Hawk:*:*:PowerMAX_OS | Synergy:PowerMAX_OS:*:*) echo powerpc-harris-powermax exit ;; Night_Hawk:Power_UNIX:*:*) echo powerpc-harris-powerunix exit ;; m88k:CX/UX:7*:*) echo m88k-harris-cxux7 exit ;; m88k:*:4*:R4*) echo m88k-motorola-sysv4 exit ;; m88k:*:3*:R3*) echo m88k-motorola-sysv3 exit ;; AViiON:dgux:*:*) # DG/UX returns AViiON for all architectures UNAME_PROCESSOR=`/usr/bin/uname -p` if [ $UNAME_PROCESSOR = mc88100 ] || [ $UNAME_PROCESSOR = mc88110 ] then if [ ${TARGET_BINARY_INTERFACE}x = m88kdguxelfx ] || \ [ ${TARGET_BINARY_INTERFACE}x = x ] then echo m88k-dg-dgux${UNAME_RELEASE} else echo m88k-dg-dguxbcs${UNAME_RELEASE} fi else echo i586-dg-dgux${UNAME_RELEASE} fi exit ;; M88*:DolphinOS:*:*) # DolphinOS (SVR3) echo m88k-dolphin-sysv3 exit ;; M88*:*:R3*:*) # Delta 88k system running SVR3 echo m88k-motorola-sysv3 exit ;; XD88*:*:*:*) # Tektronix XD88 system running UTekV (SVR3) echo m88k-tektronix-sysv3 exit ;; Tek43[0-9][0-9]:UTek:*:*) # Tektronix 4300 system running UTek (BSD) echo m68k-tektronix-bsd exit ;; *:IRIX*:*:*) echo mips-sgi-irix`echo ${UNAME_RELEASE}|sed -e 's/-/_/g'` exit ;; ????????:AIX?:[12].1:2) # AIX 2.2.1 or AIX 2.1.1 is RT/PC AIX. echo romp-ibm-aix # uname -m gives an 8 hex-code CPU id exit ;; # Note that: echo "'`uname -s`'" gives 'AIX ' i*86:AIX:*:*) echo i386-ibm-aix exit ;; ia64:AIX:*:*) if [ -x /usr/bin/oslevel ] ; then IBM_REV=`/usr/bin/oslevel` else IBM_REV=${UNAME_VERSION}.${UNAME_RELEASE} fi echo ${UNAME_MACHINE}-ibm-aix${IBM_REV} exit ;; *:AIX:2:3) if grep bos325 /usr/include/stdio.h >/dev/null 2>&1; then eval $set_cc_for_build sed 's/^ //' << EOF >$dummy.c #include main() { if (!__power_pc()) exit(1); puts("powerpc-ibm-aix3.2.5"); exit(0); } EOF if $CC_FOR_BUILD -o $dummy $dummy.c && SYSTEM_NAME=`$dummy` then echo "$SYSTEM_NAME" else echo rs6000-ibm-aix3.2.5 fi elif grep bos324 /usr/include/stdio.h >/dev/null 2>&1; then echo rs6000-ibm-aix3.2.4 else echo rs6000-ibm-aix3.2 fi exit ;; *:AIX:*:[4567]) IBM_CPU_ID=`/usr/sbin/lsdev -C -c processor -S available | sed 1q | awk '{ print $1 }'` if /usr/sbin/lsattr -El ${IBM_CPU_ID} | grep ' POWER' >/dev/null 2>&1; then IBM_ARCH=rs6000 else IBM_ARCH=powerpc fi if [ -x /usr/bin/oslevel ] ; then IBM_REV=`/usr/bin/oslevel` else IBM_REV=${UNAME_VERSION}.${UNAME_RELEASE} fi echo ${IBM_ARCH}-ibm-aix${IBM_REV} exit ;; *:AIX:*:*) echo rs6000-ibm-aix exit ;; ibmrt:4.4BSD:*|romp-ibm:BSD:*) echo romp-ibm-bsd4.4 exit ;; ibmrt:*BSD:*|romp-ibm:BSD:*) # covers RT/PC BSD and echo romp-ibm-bsd${UNAME_RELEASE} # 4.3 with uname added to exit ;; # report: romp-ibm BSD 4.3 *:BOSX:*:*) echo rs6000-bull-bosx exit ;; DPX/2?00:B.O.S.:*:*) echo m68k-bull-sysv3 exit ;; 9000/[34]??:4.3bsd:1.*:*) echo m68k-hp-bsd exit ;; hp300:4.4BSD:*:* | 9000/[34]??:4.3bsd:2.*:*) echo m68k-hp-bsd4.4 exit ;; 9000/[34678]??:HP-UX:*:*) HPUX_REV=`echo ${UNAME_RELEASE}|sed -e 's/[^.]*.[0B]*//'` case "${UNAME_MACHINE}" in 9000/31? ) HP_ARCH=m68000 ;; 9000/[34]?? ) HP_ARCH=m68k ;; 9000/[678][0-9][0-9]) if [ -x /usr/bin/getconf ]; then sc_cpu_version=`/usr/bin/getconf SC_CPU_VERSION 2>/dev/null` sc_kernel_bits=`/usr/bin/getconf SC_KERNEL_BITS 2>/dev/null` case "${sc_cpu_version}" in 523) HP_ARCH="hppa1.0" ;; # CPU_PA_RISC1_0 528) HP_ARCH="hppa1.1" ;; # CPU_PA_RISC1_1 532) # CPU_PA_RISC2_0 case "${sc_kernel_bits}" in 32) HP_ARCH="hppa2.0n" ;; 64) HP_ARCH="hppa2.0w" ;; '') HP_ARCH="hppa2.0" ;; # HP-UX 10.20 esac ;; esac fi if [ "${HP_ARCH}" = "" ]; then eval $set_cc_for_build sed 's/^ //' << EOF >$dummy.c #define _HPUX_SOURCE #include #include int main () { #if defined(_SC_KERNEL_BITS) long bits = sysconf(_SC_KERNEL_BITS); #endif long cpu = sysconf (_SC_CPU_VERSION); switch (cpu) { case CPU_PA_RISC1_0: puts ("hppa1.0"); break; case CPU_PA_RISC1_1: puts ("hppa1.1"); break; case CPU_PA_RISC2_0: #if defined(_SC_KERNEL_BITS) switch (bits) { case 64: puts ("hppa2.0w"); break; case 32: puts ("hppa2.0n"); break; default: puts ("hppa2.0"); break; } break; #else /* !defined(_SC_KERNEL_BITS) */ puts ("hppa2.0"); break; #endif default: puts ("hppa1.0"); break; } exit (0); } EOF (CCOPTS= $CC_FOR_BUILD -o $dummy $dummy.c 2>/dev/null) && HP_ARCH=`$dummy` test -z "$HP_ARCH" && HP_ARCH=hppa fi ;; esac if [ ${HP_ARCH} = "hppa2.0w" ] then eval $set_cc_for_build # hppa2.0w-hp-hpux* has a 64-bit kernel and a compiler generating # 32-bit code. hppa64-hp-hpux* has the same kernel and a compiler # generating 64-bit code. GNU and HP use different nomenclature: # # $ CC_FOR_BUILD=cc ./config.guess # => hppa2.0w-hp-hpux11.23 # $ CC_FOR_BUILD="cc +DA2.0w" ./config.guess # => hppa64-hp-hpux11.23 if echo __LP64__ | (CCOPTS= $CC_FOR_BUILD -E - 2>/dev/null) | grep -q __LP64__ then HP_ARCH="hppa2.0w" else HP_ARCH="hppa64" fi fi echo ${HP_ARCH}-hp-hpux${HPUX_REV} exit ;; ia64:HP-UX:*:*) HPUX_REV=`echo ${UNAME_RELEASE}|sed -e 's/[^.]*.[0B]*//'` echo ia64-hp-hpux${HPUX_REV} exit ;; 3050*:HI-UX:*:*) eval $set_cc_for_build sed 's/^ //' << EOF >$dummy.c #include int main () { long cpu = sysconf (_SC_CPU_VERSION); /* The order matters, because CPU_IS_HP_MC68K erroneously returns true for CPU_PA_RISC1_0. CPU_IS_PA_RISC returns correct results, however. */ if (CPU_IS_PA_RISC (cpu)) { switch (cpu) { case CPU_PA_RISC1_0: puts ("hppa1.0-hitachi-hiuxwe2"); break; case CPU_PA_RISC1_1: puts ("hppa1.1-hitachi-hiuxwe2"); break; case CPU_PA_RISC2_0: puts ("hppa2.0-hitachi-hiuxwe2"); break; default: puts ("hppa-hitachi-hiuxwe2"); break; } } else if (CPU_IS_HP_MC68K (cpu)) puts ("m68k-hitachi-hiuxwe2"); else puts ("unknown-hitachi-hiuxwe2"); exit (0); } EOF $CC_FOR_BUILD -o $dummy $dummy.c && SYSTEM_NAME=`$dummy` && { echo "$SYSTEM_NAME"; exit; } echo unknown-hitachi-hiuxwe2 exit ;; 9000/7??:4.3bsd:*:* | 9000/8?[79]:4.3bsd:*:* ) echo hppa1.1-hp-bsd exit ;; 9000/8??:4.3bsd:*:*) echo hppa1.0-hp-bsd exit ;; *9??*:MPE/iX:*:* | *3000*:MPE/iX:*:*) echo hppa1.0-hp-mpeix exit ;; hp7??:OSF1:*:* | hp8?[79]:OSF1:*:* ) echo hppa1.1-hp-osf exit ;; hp8??:OSF1:*:*) echo hppa1.0-hp-osf exit ;; i*86:OSF1:*:*) if [ -x /usr/sbin/sysversion ] ; then echo ${UNAME_MACHINE}-unknown-osf1mk else echo ${UNAME_MACHINE}-unknown-osf1 fi exit ;; parisc*:Lites*:*:*) echo hppa1.1-hp-lites exit ;; C1*:ConvexOS:*:* | convex:ConvexOS:C1*:*) echo c1-convex-bsd exit ;; C2*:ConvexOS:*:* | convex:ConvexOS:C2*:*) if getsysinfo -f scalar_acc then echo c32-convex-bsd else echo c2-convex-bsd fi exit ;; C34*:ConvexOS:*:* | convex:ConvexOS:C34*:*) echo c34-convex-bsd exit ;; C38*:ConvexOS:*:* | convex:ConvexOS:C38*:*) echo c38-convex-bsd exit ;; C4*:ConvexOS:*:* | convex:ConvexOS:C4*:*) echo c4-convex-bsd exit ;; CRAY*Y-MP:*:*:*) echo ymp-cray-unicos${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/' exit ;; CRAY*[A-Z]90:*:*:*) echo ${UNAME_MACHINE}-cray-unicos${UNAME_RELEASE} \ | sed -e 's/CRAY.*\([A-Z]90\)/\1/' \ -e y/ABCDEFGHIJKLMNOPQRSTUVWXYZ/abcdefghijklmnopqrstuvwxyz/ \ -e 's/\.[^.]*$/.X/' exit ;; CRAY*TS:*:*:*) echo t90-cray-unicos${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/' exit ;; CRAY*T3E:*:*:*) echo alphaev5-cray-unicosmk${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/' exit ;; CRAY*SV1:*:*:*) echo sv1-cray-unicos${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/' exit ;; *:UNICOS/mp:*:*) echo craynv-cray-unicosmp${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/' exit ;; F30[01]:UNIX_System_V:*:* | F700:UNIX_System_V:*:*) FUJITSU_PROC=`uname -m | tr 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' 'abcdefghijklmnopqrstuvwxyz'` FUJITSU_SYS=`uname -p | tr 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' 'abcdefghijklmnopqrstuvwxyz' | sed -e 's/\///'` FUJITSU_REL=`echo ${UNAME_RELEASE} | sed -e 's/ /_/'` echo "${FUJITSU_PROC}-fujitsu-${FUJITSU_SYS}${FUJITSU_REL}" exit ;; 5000:UNIX_System_V:4.*:*) FUJITSU_SYS=`uname -p | tr 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' 'abcdefghijklmnopqrstuvwxyz' | sed -e 's/\///'` FUJITSU_REL=`echo ${UNAME_RELEASE} | tr 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' 'abcdefghijklmnopqrstuvwxyz' | sed -e 's/ /_/'` echo "sparc-fujitsu-${FUJITSU_SYS}${FUJITSU_REL}" exit ;; i*86:BSD/386:*:* | i*86:BSD/OS:*:* | *:Ascend\ Embedded/OS:*:*) echo ${UNAME_MACHINE}-pc-bsdi${UNAME_RELEASE} exit ;; sparc*:BSD/OS:*:*) echo sparc-unknown-bsdi${UNAME_RELEASE} exit ;; *:BSD/OS:*:*) echo ${UNAME_MACHINE}-unknown-bsdi${UNAME_RELEASE} exit ;; *:FreeBSD:*:*) UNAME_PROCESSOR=`/usr/bin/uname -p` case ${UNAME_PROCESSOR} in amd64) echo x86_64-unknown-freebsd`echo ${UNAME_RELEASE}|sed -e 's/[-(].*//'` ;; *) echo ${UNAME_PROCESSOR}-unknown-freebsd`echo ${UNAME_RELEASE}|sed -e 's/[-(].*//'` ;; esac exit ;; i*:CYGWIN*:*) echo ${UNAME_MACHINE}-pc-cygwin exit ;; *:MINGW64*:*) echo ${UNAME_MACHINE}-pc-mingw64 exit ;; *:MINGW*:*) echo ${UNAME_MACHINE}-pc-mingw32 exit ;; i*:MSYS*:*) echo ${UNAME_MACHINE}-pc-msys exit ;; i*:windows32*:*) # uname -m includes "-pc" on this system. echo ${UNAME_MACHINE}-mingw32 exit ;; i*:PW*:*) echo ${UNAME_MACHINE}-pc-pw32 exit ;; *:Interix*:*) case ${UNAME_MACHINE} in x86) echo i586-pc-interix${UNAME_RELEASE} exit ;; authenticamd | genuineintel | EM64T) echo x86_64-unknown-interix${UNAME_RELEASE} exit ;; IA64) echo ia64-unknown-interix${UNAME_RELEASE} exit ;; esac ;; [345]86:Windows_95:* | [345]86:Windows_98:* | [345]86:Windows_NT:*) echo i${UNAME_MACHINE}-pc-mks exit ;; 8664:Windows_NT:*) echo x86_64-pc-mks exit ;; i*:Windows_NT*:* | Pentium*:Windows_NT*:*) # How do we know it's Interix rather than the generic POSIX subsystem? # It also conflicts with pre-2.0 versions of AT&T UWIN. Should we # UNAME_MACHINE based on the output of uname instead of i386? echo i586-pc-interix exit ;; i*:UWIN*:*) echo ${UNAME_MACHINE}-pc-uwin exit ;; amd64:CYGWIN*:*:* | x86_64:CYGWIN*:*:*) echo x86_64-unknown-cygwin exit ;; p*:CYGWIN*:*) echo powerpcle-unknown-cygwin exit ;; prep*:SunOS:5.*:*) echo powerpcle-unknown-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'` exit ;; *:GNU:*:*) # the GNU system echo `echo ${UNAME_MACHINE}|sed -e 's,[-/].*$,,'`-unknown-${LIBC}`echo ${UNAME_RELEASE}|sed -e 's,/.*$,,'` exit ;; *:GNU/*:*:*) # other systems with GNU libc and userland echo ${UNAME_MACHINE}-unknown-`echo ${UNAME_SYSTEM} | sed 's,^[^/]*/,,' | tr '[A-Z]' '[a-z]'``echo ${UNAME_RELEASE}|sed -e 's/[-(].*//'`-${LIBC} exit ;; i*86:Minix:*:*) echo ${UNAME_MACHINE}-pc-minix exit ;; aarch64:Linux:*:*) echo ${UNAME_MACHINE}-unknown-linux-${LIBC} exit ;; aarch64_be:Linux:*:*) UNAME_MACHINE=aarch64_be echo ${UNAME_MACHINE}-unknown-linux-${LIBC} exit ;; alpha:Linux:*:*) case `sed -n '/^cpu model/s/^.*: \(.*\)/\1/p' < /proc/cpuinfo` in EV5) UNAME_MACHINE=alphaev5 ;; EV56) UNAME_MACHINE=alphaev56 ;; PCA56) UNAME_MACHINE=alphapca56 ;; PCA57) UNAME_MACHINE=alphapca56 ;; EV6) UNAME_MACHINE=alphaev6 ;; EV67) UNAME_MACHINE=alphaev67 ;; EV68*) UNAME_MACHINE=alphaev68 ;; esac objdump --private-headers /bin/sh | grep -q ld.so.1 if test "$?" = 0 ; then LIBC="gnulibc1" ; fi echo ${UNAME_MACHINE}-unknown-linux-${LIBC} exit ;; arc:Linux:*:* | arceb:Linux:*:*) echo ${UNAME_MACHINE}-unknown-linux-${LIBC} exit ;; arm*:Linux:*:*) eval $set_cc_for_build if echo __ARM_EABI__ | $CC_FOR_BUILD -E - 2>/dev/null \ | grep -q __ARM_EABI__ then echo ${UNAME_MACHINE}-unknown-linux-${LIBC} else if echo __ARM_PCS_VFP | $CC_FOR_BUILD -E - 2>/dev/null \ | grep -q __ARM_PCS_VFP then echo ${UNAME_MACHINE}-unknown-linux-${LIBC}eabi else echo ${UNAME_MACHINE}-unknown-linux-${LIBC}eabihf fi fi exit ;; avr32*:Linux:*:*) echo ${UNAME_MACHINE}-unknown-linux-${LIBC} exit ;; cris:Linux:*:*) echo ${UNAME_MACHINE}-axis-linux-${LIBC} exit ;; crisv32:Linux:*:*) echo ${UNAME_MACHINE}-axis-linux-${LIBC} exit ;; frv:Linux:*:*) echo ${UNAME_MACHINE}-unknown-linux-${LIBC} exit ;; hexagon:Linux:*:*) echo ${UNAME_MACHINE}-unknown-linux-${LIBC} exit ;; i*86:Linux:*:*) echo ${UNAME_MACHINE}-pc-linux-${LIBC} exit ;; ia64:Linux:*:*) echo ${UNAME_MACHINE}-unknown-linux-${LIBC} exit ;; m32r*:Linux:*:*) echo ${UNAME_MACHINE}-unknown-linux-${LIBC} exit ;; m68*:Linux:*:*) echo ${UNAME_MACHINE}-unknown-linux-${LIBC} exit ;; mips:Linux:*:* | mips64:Linux:*:*) eval $set_cc_for_build sed 's/^ //' << EOF >$dummy.c #undef CPU #undef ${UNAME_MACHINE} #undef ${UNAME_MACHINE}el #if defined(__MIPSEL__) || defined(__MIPSEL) || defined(_MIPSEL) || defined(MIPSEL) CPU=${UNAME_MACHINE}el #else #if defined(__MIPSEB__) || defined(__MIPSEB) || defined(_MIPSEB) || defined(MIPSEB) CPU=${UNAME_MACHINE} #else CPU= #endif #endif EOF eval `$CC_FOR_BUILD -E $dummy.c 2>/dev/null | grep '^CPU'` test x"${CPU}" != x && { echo "${CPU}-unknown-linux-${LIBC}"; exit; } ;; or1k:Linux:*:*) echo ${UNAME_MACHINE}-unknown-linux-${LIBC} exit ;; or32:Linux:*:*) echo ${UNAME_MACHINE}-unknown-linux-${LIBC} exit ;; padre:Linux:*:*) echo sparc-unknown-linux-${LIBC} exit ;; parisc64:Linux:*:* | hppa64:Linux:*:*) echo hppa64-unknown-linux-${LIBC} exit ;; parisc:Linux:*:* | hppa:Linux:*:*) # Look for CPU level case `grep '^cpu[^a-z]*:' /proc/cpuinfo 2>/dev/null | cut -d' ' -f2` in PA7*) echo hppa1.1-unknown-linux-${LIBC} ;; PA8*) echo hppa2.0-unknown-linux-${LIBC} ;; *) echo hppa-unknown-linux-${LIBC} ;; esac exit ;; ppc64:Linux:*:*) echo powerpc64-unknown-linux-${LIBC} exit ;; ppc:Linux:*:*) echo powerpc-unknown-linux-${LIBC} exit ;; ppc64le:Linux:*:*) echo powerpc64le-unknown-linux-${LIBC} exit ;; ppcle:Linux:*:*) echo powerpcle-unknown-linux-${LIBC} exit ;; s390:Linux:*:* | s390x:Linux:*:*) echo ${UNAME_MACHINE}-ibm-linux-${LIBC} exit ;; sh64*:Linux:*:*) echo ${UNAME_MACHINE}-unknown-linux-${LIBC} exit ;; sh*:Linux:*:*) echo ${UNAME_MACHINE}-unknown-linux-${LIBC} exit ;; sparc:Linux:*:* | sparc64:Linux:*:*) echo ${UNAME_MACHINE}-unknown-linux-${LIBC} exit ;; tile*:Linux:*:*) echo ${UNAME_MACHINE}-unknown-linux-${LIBC} exit ;; vax:Linux:*:*) echo ${UNAME_MACHINE}-dec-linux-${LIBC} exit ;; x86_64:Linux:*:*) echo ${UNAME_MACHINE}-unknown-linux-${LIBC} exit ;; xtensa*:Linux:*:*) echo ${UNAME_MACHINE}-unknown-linux-${LIBC} exit ;; i*86:DYNIX/ptx:4*:*) # ptx 4.0 does uname -s correctly, with DYNIX/ptx in there. # earlier versions are messed up and put the nodename in both # sysname and nodename. echo i386-sequent-sysv4 exit ;; i*86:UNIX_SV:4.2MP:2.*) # Unixware is an offshoot of SVR4, but it has its own version # number series starting with 2... # I am not positive that other SVR4 systems won't match this, # I just have to hope. -- rms. # Use sysv4.2uw... so that sysv4* matches it. echo ${UNAME_MACHINE}-pc-sysv4.2uw${UNAME_VERSION} exit ;; i*86:OS/2:*:*) # If we were able to find `uname', then EMX Unix compatibility # is probably installed. echo ${UNAME_MACHINE}-pc-os2-emx exit ;; i*86:XTS-300:*:STOP) echo ${UNAME_MACHINE}-unknown-stop exit ;; i*86:atheos:*:*) echo ${UNAME_MACHINE}-unknown-atheos exit ;; i*86:syllable:*:*) echo ${UNAME_MACHINE}-pc-syllable exit ;; i*86:LynxOS:2.*:* | i*86:LynxOS:3.[01]*:* | i*86:LynxOS:4.[02]*:*) echo i386-unknown-lynxos${UNAME_RELEASE} exit ;; i*86:*DOS:*:*) echo ${UNAME_MACHINE}-pc-msdosdjgpp exit ;; i*86:*:4.*:* | i*86:SYSTEM_V:4.*:*) UNAME_REL=`echo ${UNAME_RELEASE} | sed 's/\/MP$//'` if grep Novell /usr/include/link.h >/dev/null 2>/dev/null; then echo ${UNAME_MACHINE}-univel-sysv${UNAME_REL} else echo ${UNAME_MACHINE}-pc-sysv${UNAME_REL} fi exit ;; i*86:*:5:[678]*) # UnixWare 7.x, OpenUNIX and OpenServer 6. case `/bin/uname -X | grep "^Machine"` in *486*) UNAME_MACHINE=i486 ;; *Pentium) UNAME_MACHINE=i586 ;; *Pent*|*Celeron) UNAME_MACHINE=i686 ;; esac echo ${UNAME_MACHINE}-unknown-sysv${UNAME_RELEASE}${UNAME_SYSTEM}${UNAME_VERSION} exit ;; i*86:*:3.2:*) if test -f /usr/options/cb.name; then UNAME_REL=`sed -n 's/.*Version //p' /dev/null >/dev/null ; then UNAME_REL=`(/bin/uname -X|grep Release|sed -e 's/.*= //')` (/bin/uname -X|grep i80486 >/dev/null) && UNAME_MACHINE=i486 (/bin/uname -X|grep '^Machine.*Pentium' >/dev/null) \ && UNAME_MACHINE=i586 (/bin/uname -X|grep '^Machine.*Pent *II' >/dev/null) \ && UNAME_MACHINE=i686 (/bin/uname -X|grep '^Machine.*Pentium Pro' >/dev/null) \ && UNAME_MACHINE=i686 echo ${UNAME_MACHINE}-pc-sco$UNAME_REL else echo ${UNAME_MACHINE}-pc-sysv32 fi exit ;; pc:*:*:*) # Left here for compatibility: # uname -m prints for DJGPP always 'pc', but it prints nothing about # the processor, so we play safe by assuming i586. # Note: whatever this is, it MUST be the same as what config.sub # prints for the "djgpp" host, or else GDB configury will decide that # this is a cross-build. echo i586-pc-msdosdjgpp exit ;; Intel:Mach:3*:*) echo i386-pc-mach3 exit ;; paragon:*:*:*) echo i860-intel-osf1 exit ;; i860:*:4.*:*) # i860-SVR4 if grep Stardent /usr/include/sys/uadmin.h >/dev/null 2>&1 ; then echo i860-stardent-sysv${UNAME_RELEASE} # Stardent Vistra i860-SVR4 else # Add other i860-SVR4 vendors below as they are discovered. echo i860-unknown-sysv${UNAME_RELEASE} # Unknown i860-SVR4 fi exit ;; mini*:CTIX:SYS*5:*) # "miniframe" echo m68010-convergent-sysv exit ;; mc68k:UNIX:SYSTEM5:3.51m) echo m68k-convergent-sysv exit ;; M680?0:D-NIX:5.3:*) echo m68k-diab-dnix exit ;; M68*:*:R3V[5678]*:*) test -r /sysV68 && { echo 'm68k-motorola-sysv'; exit; } ;; 3[345]??:*:4.0:3.0 | 3[34]??A:*:4.0:3.0 | 3[34]??,*:*:4.0:3.0 | 3[34]??/*:*:4.0:3.0 | 4400:*:4.0:3.0 | 4850:*:4.0:3.0 | SKA40:*:4.0:3.0 | SDS2:*:4.0:3.0 | SHG2:*:4.0:3.0 | S7501*:*:4.0:3.0) OS_REL='' test -r /etc/.relid \ && OS_REL=.`sed -n 's/[^ ]* [^ ]* \([0-9][0-9]\).*/\1/p' < /etc/.relid` /bin/uname -p 2>/dev/null | grep 86 >/dev/null \ && { echo i486-ncr-sysv4.3${OS_REL}; exit; } /bin/uname -p 2>/dev/null | /bin/grep entium >/dev/null \ && { echo i586-ncr-sysv4.3${OS_REL}; exit; } ;; 3[34]??:*:4.0:* | 3[34]??,*:*:4.0:*) /bin/uname -p 2>/dev/null | grep 86 >/dev/null \ && { echo i486-ncr-sysv4; exit; } ;; NCR*:*:4.2:* | MPRAS*:*:4.2:*) OS_REL='.3' test -r /etc/.relid \ && OS_REL=.`sed -n 's/[^ ]* [^ ]* \([0-9][0-9]\).*/\1/p' < /etc/.relid` /bin/uname -p 2>/dev/null | grep 86 >/dev/null \ && { echo i486-ncr-sysv4.3${OS_REL}; exit; } /bin/uname -p 2>/dev/null | /bin/grep entium >/dev/null \ && { echo i586-ncr-sysv4.3${OS_REL}; exit; } /bin/uname -p 2>/dev/null | /bin/grep pteron >/dev/null \ && { echo i586-ncr-sysv4.3${OS_REL}; exit; } ;; m68*:LynxOS:2.*:* | m68*:LynxOS:3.0*:*) echo m68k-unknown-lynxos${UNAME_RELEASE} exit ;; mc68030:UNIX_System_V:4.*:*) echo m68k-atari-sysv4 exit ;; TSUNAMI:LynxOS:2.*:*) echo sparc-unknown-lynxos${UNAME_RELEASE} exit ;; rs6000:LynxOS:2.*:*) echo rs6000-unknown-lynxos${UNAME_RELEASE} exit ;; PowerPC:LynxOS:2.*:* | PowerPC:LynxOS:3.[01]*:* | PowerPC:LynxOS:4.[02]*:*) echo powerpc-unknown-lynxos${UNAME_RELEASE} exit ;; SM[BE]S:UNIX_SV:*:*) echo mips-dde-sysv${UNAME_RELEASE} exit ;; RM*:ReliantUNIX-*:*:*) echo mips-sni-sysv4 exit ;; RM*:SINIX-*:*:*) echo mips-sni-sysv4 exit ;; *:SINIX-*:*:*) if uname -p 2>/dev/null >/dev/null ; then UNAME_MACHINE=`(uname -p) 2>/dev/null` echo ${UNAME_MACHINE}-sni-sysv4 else echo ns32k-sni-sysv fi exit ;; PENTIUM:*:4.0*:*) # Unisys `ClearPath HMP IX 4000' SVR4/MP effort # says echo i586-unisys-sysv4 exit ;; *:UNIX_System_V:4*:FTX*) # From Gerald Hewes . # How about differentiating between stratus architectures? -djm echo hppa1.1-stratus-sysv4 exit ;; *:*:*:FTX*) # From seanf@swdc.stratus.com. echo i860-stratus-sysv4 exit ;; i*86:VOS:*:*) # From Paul.Green@stratus.com. echo ${UNAME_MACHINE}-stratus-vos exit ;; *:VOS:*:*) # From Paul.Green@stratus.com. echo hppa1.1-stratus-vos exit ;; mc68*:A/UX:*:*) echo m68k-apple-aux${UNAME_RELEASE} exit ;; news*:NEWS-OS:6*:*) echo mips-sony-newsos6 exit ;; R[34]000:*System_V*:*:* | R4000:UNIX_SYSV:*:* | R*000:UNIX_SV:*:*) if [ -d /usr/nec ]; then echo mips-nec-sysv${UNAME_RELEASE} else echo mips-unknown-sysv${UNAME_RELEASE} fi exit ;; BeBox:BeOS:*:*) # BeOS running on hardware made by Be, PPC only. echo powerpc-be-beos exit ;; BeMac:BeOS:*:*) # BeOS running on Mac or Mac clone, PPC only. echo powerpc-apple-beos exit ;; BePC:BeOS:*:*) # BeOS running on Intel PC compatible. echo i586-pc-beos exit ;; BePC:Haiku:*:*) # Haiku running on Intel PC compatible. echo i586-pc-haiku exit ;; x86_64:Haiku:*:*) echo x86_64-unknown-haiku exit ;; SX-4:SUPER-UX:*:*) echo sx4-nec-superux${UNAME_RELEASE} exit ;; SX-5:SUPER-UX:*:*) echo sx5-nec-superux${UNAME_RELEASE} exit ;; SX-6:SUPER-UX:*:*) echo sx6-nec-superux${UNAME_RELEASE} exit ;; SX-7:SUPER-UX:*:*) echo sx7-nec-superux${UNAME_RELEASE} exit ;; SX-8:SUPER-UX:*:*) echo sx8-nec-superux${UNAME_RELEASE} exit ;; SX-8R:SUPER-UX:*:*) echo sx8r-nec-superux${UNAME_RELEASE} exit ;; Power*:Rhapsody:*:*) echo powerpc-apple-rhapsody${UNAME_RELEASE} exit ;; *:Rhapsody:*:*) echo ${UNAME_MACHINE}-apple-rhapsody${UNAME_RELEASE} exit ;; *:Darwin:*:*) UNAME_PROCESSOR=`uname -p` || UNAME_PROCESSOR=unknown eval $set_cc_for_build if test "$UNAME_PROCESSOR" = unknown ; then UNAME_PROCESSOR=powerpc fi if [ "$CC_FOR_BUILD" != 'no_compiler_found' ]; then if (echo '#ifdef __LP64__'; echo IS_64BIT_ARCH; echo '#endif') | \ (CCOPTS= $CC_FOR_BUILD -E - 2>/dev/null) | \ grep IS_64BIT_ARCH >/dev/null then case $UNAME_PROCESSOR in i386) UNAME_PROCESSOR=x86_64 ;; powerpc) UNAME_PROCESSOR=powerpc64 ;; esac fi fi echo ${UNAME_PROCESSOR}-apple-darwin${UNAME_RELEASE} exit ;; *:procnto*:*:* | *:QNX:[0123456789]*:*) UNAME_PROCESSOR=`uname -p` if test "$UNAME_PROCESSOR" = "x86"; then UNAME_PROCESSOR=i386 UNAME_MACHINE=pc fi echo ${UNAME_PROCESSOR}-${UNAME_MACHINE}-nto-qnx${UNAME_RELEASE} exit ;; *:QNX:*:4*) echo i386-pc-qnx exit ;; NEO-?:NONSTOP_KERNEL:*:*) echo neo-tandem-nsk${UNAME_RELEASE} exit ;; NSE-*:NONSTOP_KERNEL:*:*) echo nse-tandem-nsk${UNAME_RELEASE} exit ;; NSR-?:NONSTOP_KERNEL:*:*) echo nsr-tandem-nsk${UNAME_RELEASE} exit ;; *:NonStop-UX:*:*) echo mips-compaq-nonstopux exit ;; BS2000:POSIX*:*:*) echo bs2000-siemens-sysv exit ;; DS/*:UNIX_System_V:*:*) echo ${UNAME_MACHINE}-${UNAME_SYSTEM}-${UNAME_RELEASE} exit ;; *:Plan9:*:*) # "uname -m" is not consistent, so use $cputype instead. 386 # is converted to i386 for consistency with other x86 # operating systems. if test "$cputype" = "386"; then UNAME_MACHINE=i386 else UNAME_MACHINE="$cputype" fi echo ${UNAME_MACHINE}-unknown-plan9 exit ;; *:TOPS-10:*:*) echo pdp10-unknown-tops10 exit ;; *:TENEX:*:*) echo pdp10-unknown-tenex exit ;; KS10:TOPS-20:*:* | KL10:TOPS-20:*:* | TYPE4:TOPS-20:*:*) echo pdp10-dec-tops20 exit ;; XKL-1:TOPS-20:*:* | TYPE5:TOPS-20:*:*) echo pdp10-xkl-tops20 exit ;; *:TOPS-20:*:*) echo pdp10-unknown-tops20 exit ;; *:ITS:*:*) echo pdp10-unknown-its exit ;; SEI:*:*:SEIUX) echo mips-sei-seiux${UNAME_RELEASE} exit ;; *:DragonFly:*:*) echo ${UNAME_MACHINE}-unknown-dragonfly`echo ${UNAME_RELEASE}|sed -e 's/[-(].*//'` exit ;; *:*VMS:*:*) UNAME_MACHINE=`(uname -p) 2>/dev/null` case "${UNAME_MACHINE}" in A*) echo alpha-dec-vms ; exit ;; I*) echo ia64-dec-vms ; exit ;; V*) echo vax-dec-vms ; exit ;; esac ;; *:XENIX:*:SysV) echo i386-pc-xenix exit ;; i*86:skyos:*:*) echo ${UNAME_MACHINE}-pc-skyos`echo ${UNAME_RELEASE}` | sed -e 's/ .*$//' exit ;; i*86:rdos:*:*) echo ${UNAME_MACHINE}-pc-rdos exit ;; i*86:AROS:*:*) echo ${UNAME_MACHINE}-pc-aros exit ;; x86_64:VMkernel:*:*) echo ${UNAME_MACHINE}-unknown-esx exit ;; esac eval $set_cc_for_build cat >$dummy.c < # include #endif main () { #if defined (sony) #if defined (MIPSEB) /* BFD wants "bsd" instead of "newsos". Perhaps BFD should be changed, I don't know.... */ printf ("mips-sony-bsd\n"); exit (0); #else #include printf ("m68k-sony-newsos%s\n", #ifdef NEWSOS4 "4" #else "" #endif ); exit (0); #endif #endif #if defined (__arm) && defined (__acorn) && defined (__unix) printf ("arm-acorn-riscix\n"); exit (0); #endif #if defined (hp300) && !defined (hpux) printf ("m68k-hp-bsd\n"); exit (0); #endif #if defined (NeXT) #if !defined (__ARCHITECTURE__) #define __ARCHITECTURE__ "m68k" #endif int version; version=`(hostinfo | sed -n 's/.*NeXT Mach \([0-9]*\).*/\1/p') 2>/dev/null`; if (version < 4) printf ("%s-next-nextstep%d\n", __ARCHITECTURE__, version); else printf ("%s-next-openstep%d\n", __ARCHITECTURE__, version); exit (0); #endif #if defined (MULTIMAX) || defined (n16) #if defined (UMAXV) printf ("ns32k-encore-sysv\n"); exit (0); #else #if defined (CMU) printf ("ns32k-encore-mach\n"); exit (0); #else printf ("ns32k-encore-bsd\n"); exit (0); #endif #endif #endif #if defined (__386BSD__) printf ("i386-pc-bsd\n"); exit (0); #endif #if defined (sequent) #if defined (i386) printf ("i386-sequent-dynix\n"); exit (0); #endif #if defined (ns32000) printf ("ns32k-sequent-dynix\n"); exit (0); #endif #endif #if defined (_SEQUENT_) struct utsname un; uname(&un); if (strncmp(un.version, "V2", 2) == 0) { printf ("i386-sequent-ptx2\n"); exit (0); } if (strncmp(un.version, "V1", 2) == 0) { /* XXX is V1 correct? */ printf ("i386-sequent-ptx1\n"); exit (0); } printf ("i386-sequent-ptx\n"); exit (0); #endif #if defined (vax) # if !defined (ultrix) # include # if defined (BSD) # if BSD == 43 printf ("vax-dec-bsd4.3\n"); exit (0); # else # if BSD == 199006 printf ("vax-dec-bsd4.3reno\n"); exit (0); # else printf ("vax-dec-bsd\n"); exit (0); # endif # endif # else printf ("vax-dec-bsd\n"); exit (0); # endif # else printf ("vax-dec-ultrix\n"); exit (0); # endif #endif #if defined (alliant) && defined (i860) printf ("i860-alliant-bsd\n"); exit (0); #endif exit (1); } EOF $CC_FOR_BUILD -o $dummy $dummy.c 2>/dev/null && SYSTEM_NAME=`$dummy` && { echo "$SYSTEM_NAME"; exit; } # Apollos put the system type in the environment. test -d /usr/apollo && { echo ${ISP}-apollo-${SYSTYPE}; exit; } # Convex versions that predate uname can use getsysinfo(1) if [ -x /usr/convex/getsysinfo ] then case `getsysinfo -f cpu_type` in c1*) echo c1-convex-bsd exit ;; c2*) if getsysinfo -f scalar_acc then echo c32-convex-bsd else echo c2-convex-bsd fi exit ;; c34*) echo c34-convex-bsd exit ;; c38*) echo c38-convex-bsd exit ;; c4*) echo c4-convex-bsd exit ;; esac fi cat >&2 < in order to provide the needed information to handle your system. config.guess timestamp = $timestamp uname -m = `(uname -m) 2>/dev/null || echo unknown` uname -r = `(uname -r) 2>/dev/null || echo unknown` uname -s = `(uname -s) 2>/dev/null || echo unknown` uname -v = `(uname -v) 2>/dev/null || echo unknown` /usr/bin/uname -p = `(/usr/bin/uname -p) 2>/dev/null` /bin/uname -X = `(/bin/uname -X) 2>/dev/null` hostinfo = `(hostinfo) 2>/dev/null` /bin/universe = `(/bin/universe) 2>/dev/null` /usr/bin/arch -k = `(/usr/bin/arch -k) 2>/dev/null` /bin/arch = `(/bin/arch) 2>/dev/null` /usr/bin/oslevel = `(/usr/bin/oslevel) 2>/dev/null` /usr/convex/getsysinfo = `(/usr/convex/getsysinfo) 2>/dev/null` UNAME_MACHINE = ${UNAME_MACHINE} UNAME_RELEASE = ${UNAME_RELEASE} UNAME_SYSTEM = ${UNAME_SYSTEM} UNAME_VERSION = ${UNAME_VERSION} EOF exit 1 # Local variables: # eval: (add-hook 'write-file-hooks 'time-stamp) # time-stamp-start: "timestamp='" # time-stamp-format: "%:y-%02m-%02d" # time-stamp-end: "'" # End: libstoragemgmt-1.2.3/build-aux/test-driver0000755000175000017500000000761112540163524015555 00000000000000#! /bin/sh # test-driver - basic testsuite driver script. scriptversion=2012-06-27.10; # UTC # Copyright (C) 2011-2013 Free Software Foundation, Inc. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2, or (at your option) # any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see . # As a special exception to the GNU General Public License, if you # distribute this file as part of a program that contains a # configuration script generated by Autoconf, you may include it under # the same distribution terms that you use for the rest of that program. # This file is maintained in Automake, please report # bugs to or send patches to # . # Make unconditional expansion of undefined variables an error. This # helps a lot in preventing typo-related bugs. set -u usage_error () { echo "$0: $*" >&2 print_usage >&2 exit 2 } print_usage () { cat <$log_file 2>&1 estatus=$? if test $enable_hard_errors = no && test $estatus -eq 99; then estatus=1 fi case $estatus:$expect_failure in 0:yes) col=$red res=XPASS recheck=yes gcopy=yes;; 0:*) col=$grn res=PASS recheck=no gcopy=no;; 77:*) col=$blu res=SKIP recheck=no gcopy=yes;; 99:*) col=$mgn res=ERROR recheck=yes gcopy=yes;; *:yes) col=$lgn res=XFAIL recheck=no gcopy=yes;; *:*) col=$red res=FAIL recheck=yes gcopy=yes;; esac # Report outcome to console. echo "${col}${res}${std}: $test_name" # Register the test result, and other relevant metadata. echo ":test-result: $res" > $trs_file echo ":global-test-result: $res" >> $trs_file echo ":recheck: $recheck" >> $trs_file echo ":copy-in-global-log: $gcopy" >> $trs_file # Local Variables: # mode: shell-script # sh-indentation: 2 # eval: (add-hook 'write-file-hooks 'time-stamp) # time-stamp-start: "scriptversion=" # time-stamp-format: "%:y-%02m-%02d.%02H" # time-stamp-time-zone: "UTC" # time-stamp-end: "; # UTC" # End: libstoragemgmt-1.2.3/m4/0000775000175000017500000000000012542455463012072 500000000000000libstoragemgmt-1.2.3/m4/lt~obsolete.m40000644000175000017500000001375612540163517014632 00000000000000# lt~obsolete.m4 -- aclocal satisfying obsolete definitions. -*-Autoconf-*- # # Copyright (C) 2004, 2005, 2007, 2009 Free Software Foundation, Inc. # Written by Scott James Remnant, 2004. # # This file is free software; the Free Software Foundation gives # unlimited permission to copy and/or distribute it, with or without # modifications, as long as this notice is preserved. # serial 5 lt~obsolete.m4 # These exist entirely to fool aclocal when bootstrapping libtool. # # In the past libtool.m4 has provided macros via AC_DEFUN (or AU_DEFUN) # which have later been changed to m4_define as they aren't part of the # exported API, or moved to Autoconf or Automake where they belong. # # The trouble is, aclocal is a bit thick. It'll see the old AC_DEFUN # in /usr/share/aclocal/libtool.m4 and remember it, then when it sees us # using a macro with the same name in our local m4/libtool.m4 it'll # pull the old libtool.m4 in (it doesn't see our shiny new m4_define # and doesn't know about Autoconf macros at all.) # # So we provide this file, which has a silly filename so it's always # included after everything else. This provides aclocal with the # AC_DEFUNs it wants, but when m4 processes it, it doesn't do anything # because those macros already exist, or will be overwritten later. # We use AC_DEFUN over AU_DEFUN for compatibility with aclocal-1.6. # # Anytime we withdraw an AC_DEFUN or AU_DEFUN, remember to add it here. # Yes, that means every name once taken will need to remain here until # we give up compatibility with versions before 1.7, at which point # we need to keep only those names which we still refer to. # This is to help aclocal find these macros, as it can't see m4_define. AC_DEFUN([LTOBSOLETE_VERSION], [m4_if([1])]) m4_ifndef([AC_LIBTOOL_LINKER_OPTION], [AC_DEFUN([AC_LIBTOOL_LINKER_OPTION])]) m4_ifndef([AC_PROG_EGREP], [AC_DEFUN([AC_PROG_EGREP])]) m4_ifndef([_LT_AC_PROG_ECHO_BACKSLASH], [AC_DEFUN([_LT_AC_PROG_ECHO_BACKSLASH])]) m4_ifndef([_LT_AC_SHELL_INIT], [AC_DEFUN([_LT_AC_SHELL_INIT])]) m4_ifndef([_LT_AC_SYS_LIBPATH_AIX], [AC_DEFUN([_LT_AC_SYS_LIBPATH_AIX])]) m4_ifndef([_LT_PROG_LTMAIN], [AC_DEFUN([_LT_PROG_LTMAIN])]) m4_ifndef([_LT_AC_TAGVAR], [AC_DEFUN([_LT_AC_TAGVAR])]) m4_ifndef([AC_LTDL_ENABLE_INSTALL], [AC_DEFUN([AC_LTDL_ENABLE_INSTALL])]) m4_ifndef([AC_LTDL_PREOPEN], [AC_DEFUN([AC_LTDL_PREOPEN])]) m4_ifndef([_LT_AC_SYS_COMPILER], [AC_DEFUN([_LT_AC_SYS_COMPILER])]) m4_ifndef([_LT_AC_LOCK], [AC_DEFUN([_LT_AC_LOCK])]) m4_ifndef([AC_LIBTOOL_SYS_OLD_ARCHIVE], [AC_DEFUN([AC_LIBTOOL_SYS_OLD_ARCHIVE])]) m4_ifndef([_LT_AC_TRY_DLOPEN_SELF], [AC_DEFUN([_LT_AC_TRY_DLOPEN_SELF])]) m4_ifndef([AC_LIBTOOL_PROG_CC_C_O], [AC_DEFUN([AC_LIBTOOL_PROG_CC_C_O])]) m4_ifndef([AC_LIBTOOL_SYS_HARD_LINK_LOCKS], [AC_DEFUN([AC_LIBTOOL_SYS_HARD_LINK_LOCKS])]) m4_ifndef([AC_LIBTOOL_OBJDIR], [AC_DEFUN([AC_LIBTOOL_OBJDIR])]) m4_ifndef([AC_LTDL_OBJDIR], [AC_DEFUN([AC_LTDL_OBJDIR])]) m4_ifndef([AC_LIBTOOL_PROG_LD_HARDCODE_LIBPATH], [AC_DEFUN([AC_LIBTOOL_PROG_LD_HARDCODE_LIBPATH])]) m4_ifndef([AC_LIBTOOL_SYS_LIB_STRIP], [AC_DEFUN([AC_LIBTOOL_SYS_LIB_STRIP])]) m4_ifndef([AC_PATH_MAGIC], [AC_DEFUN([AC_PATH_MAGIC])]) m4_ifndef([AC_PROG_LD_GNU], [AC_DEFUN([AC_PROG_LD_GNU])]) m4_ifndef([AC_PROG_LD_RELOAD_FLAG], [AC_DEFUN([AC_PROG_LD_RELOAD_FLAG])]) m4_ifndef([AC_DEPLIBS_CHECK_METHOD], [AC_DEFUN([AC_DEPLIBS_CHECK_METHOD])]) m4_ifndef([AC_LIBTOOL_PROG_COMPILER_NO_RTTI], [AC_DEFUN([AC_LIBTOOL_PROG_COMPILER_NO_RTTI])]) m4_ifndef([AC_LIBTOOL_SYS_GLOBAL_SYMBOL_PIPE], [AC_DEFUN([AC_LIBTOOL_SYS_GLOBAL_SYMBOL_PIPE])]) m4_ifndef([AC_LIBTOOL_PROG_COMPILER_PIC], [AC_DEFUN([AC_LIBTOOL_PROG_COMPILER_PIC])]) m4_ifndef([AC_LIBTOOL_PROG_LD_SHLIBS], [AC_DEFUN([AC_LIBTOOL_PROG_LD_SHLIBS])]) m4_ifndef([AC_LIBTOOL_POSTDEP_PREDEP], [AC_DEFUN([AC_LIBTOOL_POSTDEP_PREDEP])]) m4_ifndef([LT_AC_PROG_EGREP], [AC_DEFUN([LT_AC_PROG_EGREP])]) m4_ifndef([LT_AC_PROG_SED], [AC_DEFUN([LT_AC_PROG_SED])]) m4_ifndef([_LT_CC_BASENAME], [AC_DEFUN([_LT_CC_BASENAME])]) m4_ifndef([_LT_COMPILER_BOILERPLATE], [AC_DEFUN([_LT_COMPILER_BOILERPLATE])]) m4_ifndef([_LT_LINKER_BOILERPLATE], [AC_DEFUN([_LT_LINKER_BOILERPLATE])]) m4_ifndef([_AC_PROG_LIBTOOL], [AC_DEFUN([_AC_PROG_LIBTOOL])]) m4_ifndef([AC_LIBTOOL_SETUP], [AC_DEFUN([AC_LIBTOOL_SETUP])]) m4_ifndef([_LT_AC_CHECK_DLFCN], [AC_DEFUN([_LT_AC_CHECK_DLFCN])]) m4_ifndef([AC_LIBTOOL_SYS_DYNAMIC_LINKER], [AC_DEFUN([AC_LIBTOOL_SYS_DYNAMIC_LINKER])]) m4_ifndef([_LT_AC_TAGCONFIG], [AC_DEFUN([_LT_AC_TAGCONFIG])]) m4_ifndef([AC_DISABLE_FAST_INSTALL], [AC_DEFUN([AC_DISABLE_FAST_INSTALL])]) m4_ifndef([_LT_AC_LANG_CXX], [AC_DEFUN([_LT_AC_LANG_CXX])]) m4_ifndef([_LT_AC_LANG_F77], [AC_DEFUN([_LT_AC_LANG_F77])]) m4_ifndef([_LT_AC_LANG_GCJ], [AC_DEFUN([_LT_AC_LANG_GCJ])]) m4_ifndef([AC_LIBTOOL_LANG_C_CONFIG], [AC_DEFUN([AC_LIBTOOL_LANG_C_CONFIG])]) m4_ifndef([_LT_AC_LANG_C_CONFIG], [AC_DEFUN([_LT_AC_LANG_C_CONFIG])]) m4_ifndef([AC_LIBTOOL_LANG_CXX_CONFIG], [AC_DEFUN([AC_LIBTOOL_LANG_CXX_CONFIG])]) m4_ifndef([_LT_AC_LANG_CXX_CONFIG], [AC_DEFUN([_LT_AC_LANG_CXX_CONFIG])]) m4_ifndef([AC_LIBTOOL_LANG_F77_CONFIG], [AC_DEFUN([AC_LIBTOOL_LANG_F77_CONFIG])]) m4_ifndef([_LT_AC_LANG_F77_CONFIG], [AC_DEFUN([_LT_AC_LANG_F77_CONFIG])]) m4_ifndef([AC_LIBTOOL_LANG_GCJ_CONFIG], [AC_DEFUN([AC_LIBTOOL_LANG_GCJ_CONFIG])]) m4_ifndef([_LT_AC_LANG_GCJ_CONFIG], [AC_DEFUN([_LT_AC_LANG_GCJ_CONFIG])]) m4_ifndef([AC_LIBTOOL_LANG_RC_CONFIG], [AC_DEFUN([AC_LIBTOOL_LANG_RC_CONFIG])]) m4_ifndef([_LT_AC_LANG_RC_CONFIG], [AC_DEFUN([_LT_AC_LANG_RC_CONFIG])]) m4_ifndef([AC_LIBTOOL_CONFIG], [AC_DEFUN([AC_LIBTOOL_CONFIG])]) m4_ifndef([_LT_AC_FILE_LTDLL_C], [AC_DEFUN([_LT_AC_FILE_LTDLL_C])]) m4_ifndef([_LT_REQUIRED_DARWIN_CHECKS], [AC_DEFUN([_LT_REQUIRED_DARWIN_CHECKS])]) m4_ifndef([_LT_AC_PROG_CXXCPP], [AC_DEFUN([_LT_AC_PROG_CXXCPP])]) m4_ifndef([_LT_PREPARE_SED_QUOTE_VARS], [AC_DEFUN([_LT_PREPARE_SED_QUOTE_VARS])]) m4_ifndef([_LT_PROG_ECHO_BACKSLASH], [AC_DEFUN([_LT_PROG_ECHO_BACKSLASH])]) m4_ifndef([_LT_PROG_F77], [AC_DEFUN([_LT_PROG_F77])]) m4_ifndef([_LT_PROG_FC], [AC_DEFUN([_LT_PROG_FC])]) m4_ifndef([_LT_PROG_CXX], [AC_DEFUN([_LT_PROG_CXX])]) libstoragemgmt-1.2.3/m4/ax_python_module.m40000664000175000017500000000232712537546123015634 00000000000000# =========================================================================== # http://www.gnu.org/software/autoconf-archive/ax_python_module.html # =========================================================================== # # SYNOPSIS # # AX_PYTHON_MODULE(modname[, fatal]) # # DESCRIPTION # # Checks for Python module. # # If fatal is non-empty then absence of a module will trigger an error. # # LICENSE # # Copyright (c) 2008 Andrew Collier # # Copying and distribution of this file, with or without modification, are # permitted in any medium without royalty provided the copyright notice # and this notice are preserved. This file is offered as-is, without any # warranty. #serial 5 AU_ALIAS([AC_PYTHON_MODULE], [AX_PYTHON_MODULE]) AC_DEFUN([AX_PYTHON_MODULE],[ if test -z $PYTHON; then PYTHON="python" fi PYTHON_NAME=`basename $PYTHON` AC_MSG_CHECKING($PYTHON_NAME module: $1) $PYTHON -c "import $1" 2>/dev/null if test $? -eq 0; then AC_MSG_RESULT(yes) eval AS_TR_CPP(HAVE_PYMOD_$1)=yes else AC_MSG_RESULT(no) eval AS_TR_CPP(HAVE_PYMOD_$1)=no # if test -n "$2" then AC_MSG_ERROR(failed to find required module $1) exit 1 fi fi ]) libstoragemgmt-1.2.3/m4/ltsugar.m40000644000175000017500000001042412540163517013726 00000000000000# ltsugar.m4 -- libtool m4 base layer. -*-Autoconf-*- # # Copyright (C) 2004, 2005, 2007, 2008 Free Software Foundation, Inc. # Written by Gary V. Vaughan, 2004 # # This file is free software; the Free Software Foundation gives # unlimited permission to copy and/or distribute it, with or without # modifications, as long as this notice is preserved. # serial 6 ltsugar.m4 # This is to help aclocal find these macros, as it can't see m4_define. AC_DEFUN([LTSUGAR_VERSION], [m4_if([0.1])]) # lt_join(SEP, ARG1, [ARG2...]) # ----------------------------- # Produce ARG1SEPARG2...SEPARGn, omitting [] arguments and their # associated separator. # Needed until we can rely on m4_join from Autoconf 2.62, since all earlier # versions in m4sugar had bugs. m4_define([lt_join], [m4_if([$#], [1], [], [$#], [2], [[$2]], [m4_if([$2], [], [], [[$2]_])$0([$1], m4_shift(m4_shift($@)))])]) m4_define([_lt_join], [m4_if([$#$2], [2], [], [m4_if([$2], [], [], [[$1$2]])$0([$1], m4_shift(m4_shift($@)))])]) # lt_car(LIST) # lt_cdr(LIST) # ------------ # Manipulate m4 lists. # These macros are necessary as long as will still need to support # Autoconf-2.59 which quotes differently. m4_define([lt_car], [[$1]]) m4_define([lt_cdr], [m4_if([$#], 0, [m4_fatal([$0: cannot be called without arguments])], [$#], 1, [], [m4_dquote(m4_shift($@))])]) m4_define([lt_unquote], $1) # lt_append(MACRO-NAME, STRING, [SEPARATOR]) # ------------------------------------------ # Redefine MACRO-NAME to hold its former content plus `SEPARATOR'`STRING'. # Note that neither SEPARATOR nor STRING are expanded; they are appended # to MACRO-NAME as is (leaving the expansion for when MACRO-NAME is invoked). # No SEPARATOR is output if MACRO-NAME was previously undefined (different # than defined and empty). # # This macro is needed until we can rely on Autoconf 2.62, since earlier # versions of m4sugar mistakenly expanded SEPARATOR but not STRING. m4_define([lt_append], [m4_define([$1], m4_ifdef([$1], [m4_defn([$1])[$3]])[$2])]) # lt_combine(SEP, PREFIX-LIST, INFIX, SUFFIX1, [SUFFIX2...]) # ---------------------------------------------------------- # Produce a SEP delimited list of all paired combinations of elements of # PREFIX-LIST with SUFFIX1 through SUFFIXn. Each element of the list # has the form PREFIXmINFIXSUFFIXn. # Needed until we can rely on m4_combine added in Autoconf 2.62. m4_define([lt_combine], [m4_if(m4_eval([$# > 3]), [1], [m4_pushdef([_Lt_sep], [m4_define([_Lt_sep], m4_defn([lt_car]))])]]dnl [[m4_foreach([_Lt_prefix], [$2], [m4_foreach([_Lt_suffix], ]m4_dquote(m4_dquote(m4_shift(m4_shift(m4_shift($@)))))[, [_Lt_sep([$1])[]m4_defn([_Lt_prefix])[$3]m4_defn([_Lt_suffix])])])])]) # lt_if_append_uniq(MACRO-NAME, VARNAME, [SEPARATOR], [UNIQ], [NOT-UNIQ]) # ----------------------------------------------------------------------- # Iff MACRO-NAME does not yet contain VARNAME, then append it (delimited # by SEPARATOR if supplied) and expand UNIQ, else NOT-UNIQ. m4_define([lt_if_append_uniq], [m4_ifdef([$1], [m4_if(m4_index([$3]m4_defn([$1])[$3], [$3$2$3]), [-1], [lt_append([$1], [$2], [$3])$4], [$5])], [lt_append([$1], [$2], [$3])$4])]) # lt_dict_add(DICT, KEY, VALUE) # ----------------------------- m4_define([lt_dict_add], [m4_define([$1($2)], [$3])]) # lt_dict_add_subkey(DICT, KEY, SUBKEY, VALUE) # -------------------------------------------- m4_define([lt_dict_add_subkey], [m4_define([$1($2:$3)], [$4])]) # lt_dict_fetch(DICT, KEY, [SUBKEY]) # ---------------------------------- m4_define([lt_dict_fetch], [m4_ifval([$3], m4_ifdef([$1($2:$3)], [m4_defn([$1($2:$3)])]), m4_ifdef([$1($2)], [m4_defn([$1($2)])]))]) # lt_if_dict_fetch(DICT, KEY, [SUBKEY], VALUE, IF-TRUE, [IF-FALSE]) # ----------------------------------------------------------------- m4_define([lt_if_dict_fetch], [m4_if(lt_dict_fetch([$1], [$2], [$3]), [$4], [$5], [$6])]) # lt_dict_filter(DICT, [SUBKEY], VALUE, [SEPARATOR], KEY, [...]) # -------------------------------------------------------------- m4_define([lt_dict_filter], [m4_if([$5], [], [], [lt_join(m4_quote(m4_default([$4], [[, ]])), lt_unquote(m4_split(m4_normalize(m4_foreach(_Lt_key, lt_car([m4_shiftn(4, $@)]), [lt_if_dict_fetch([$1], _Lt_key, [$2], [$3], [_Lt_key ])])))))])[]dnl ]) libstoragemgmt-1.2.3/m4/ltoptions.m40000644000175000017500000003007312540163517014302 00000000000000# Helper functions for option handling. -*- Autoconf -*- # # Copyright (C) 2004, 2005, 2007, 2008, 2009 Free Software Foundation, # Inc. # Written by Gary V. Vaughan, 2004 # # This file is free software; the Free Software Foundation gives # unlimited permission to copy and/or distribute it, with or without # modifications, as long as this notice is preserved. # serial 7 ltoptions.m4 # This is to help aclocal find these macros, as it can't see m4_define. AC_DEFUN([LTOPTIONS_VERSION], [m4_if([1])]) # _LT_MANGLE_OPTION(MACRO-NAME, OPTION-NAME) # ------------------------------------------ m4_define([_LT_MANGLE_OPTION], [[_LT_OPTION_]m4_bpatsubst($1__$2, [[^a-zA-Z0-9_]], [_])]) # _LT_SET_OPTION(MACRO-NAME, OPTION-NAME) # --------------------------------------- # Set option OPTION-NAME for macro MACRO-NAME, and if there is a # matching handler defined, dispatch to it. Other OPTION-NAMEs are # saved as a flag. m4_define([_LT_SET_OPTION], [m4_define(_LT_MANGLE_OPTION([$1], [$2]))dnl m4_ifdef(_LT_MANGLE_DEFUN([$1], [$2]), _LT_MANGLE_DEFUN([$1], [$2]), [m4_warning([Unknown $1 option `$2'])])[]dnl ]) # _LT_IF_OPTION(MACRO-NAME, OPTION-NAME, IF-SET, [IF-NOT-SET]) # ------------------------------------------------------------ # Execute IF-SET if OPTION is set, IF-NOT-SET otherwise. m4_define([_LT_IF_OPTION], [m4_ifdef(_LT_MANGLE_OPTION([$1], [$2]), [$3], [$4])]) # _LT_UNLESS_OPTIONS(MACRO-NAME, OPTION-LIST, IF-NOT-SET) # ------------------------------------------------------- # Execute IF-NOT-SET unless all options in OPTION-LIST for MACRO-NAME # are set. m4_define([_LT_UNLESS_OPTIONS], [m4_foreach([_LT_Option], m4_split(m4_normalize([$2])), [m4_ifdef(_LT_MANGLE_OPTION([$1], _LT_Option), [m4_define([$0_found])])])[]dnl m4_ifdef([$0_found], [m4_undefine([$0_found])], [$3 ])[]dnl ]) # _LT_SET_OPTIONS(MACRO-NAME, OPTION-LIST) # ---------------------------------------- # OPTION-LIST is a space-separated list of Libtool options associated # with MACRO-NAME. If any OPTION has a matching handler declared with # LT_OPTION_DEFINE, dispatch to that macro; otherwise complain about # the unknown option and exit. m4_defun([_LT_SET_OPTIONS], [# Set options m4_foreach([_LT_Option], m4_split(m4_normalize([$2])), [_LT_SET_OPTION([$1], _LT_Option)]) m4_if([$1],[LT_INIT],[ dnl dnl Simply set some default values (i.e off) if boolean options were not dnl specified: _LT_UNLESS_OPTIONS([LT_INIT], [dlopen], [enable_dlopen=no ]) _LT_UNLESS_OPTIONS([LT_INIT], [win32-dll], [enable_win32_dll=no ]) dnl dnl If no reference was made to various pairs of opposing options, then dnl we run the default mode handler for the pair. For example, if neither dnl `shared' nor `disable-shared' was passed, we enable building of shared dnl archives by default: _LT_UNLESS_OPTIONS([LT_INIT], [shared disable-shared], [_LT_ENABLE_SHARED]) _LT_UNLESS_OPTIONS([LT_INIT], [static disable-static], [_LT_ENABLE_STATIC]) _LT_UNLESS_OPTIONS([LT_INIT], [pic-only no-pic], [_LT_WITH_PIC]) _LT_UNLESS_OPTIONS([LT_INIT], [fast-install disable-fast-install], [_LT_ENABLE_FAST_INSTALL]) ]) ])# _LT_SET_OPTIONS ## --------------------------------- ## ## Macros to handle LT_INIT options. ## ## --------------------------------- ## # _LT_MANGLE_DEFUN(MACRO-NAME, OPTION-NAME) # ----------------------------------------- m4_define([_LT_MANGLE_DEFUN], [[_LT_OPTION_DEFUN_]m4_bpatsubst(m4_toupper([$1__$2]), [[^A-Z0-9_]], [_])]) # LT_OPTION_DEFINE(MACRO-NAME, OPTION-NAME, CODE) # ----------------------------------------------- m4_define([LT_OPTION_DEFINE], [m4_define(_LT_MANGLE_DEFUN([$1], [$2]), [$3])[]dnl ])# LT_OPTION_DEFINE # dlopen # ------ LT_OPTION_DEFINE([LT_INIT], [dlopen], [enable_dlopen=yes ]) AU_DEFUN([AC_LIBTOOL_DLOPEN], [_LT_SET_OPTION([LT_INIT], [dlopen]) AC_DIAGNOSE([obsolete], [$0: Remove this warning and the call to _LT_SET_OPTION when you put the `dlopen' option into LT_INIT's first parameter.]) ]) dnl aclocal-1.4 backwards compatibility: dnl AC_DEFUN([AC_LIBTOOL_DLOPEN], []) # win32-dll # --------- # Declare package support for building win32 dll's. LT_OPTION_DEFINE([LT_INIT], [win32-dll], [enable_win32_dll=yes case $host in *-*-cygwin* | *-*-mingw* | *-*-pw32* | *-*-cegcc*) AC_CHECK_TOOL(AS, as, false) AC_CHECK_TOOL(DLLTOOL, dlltool, false) AC_CHECK_TOOL(OBJDUMP, objdump, false) ;; esac test -z "$AS" && AS=as _LT_DECL([], [AS], [1], [Assembler program])dnl test -z "$DLLTOOL" && DLLTOOL=dlltool _LT_DECL([], [DLLTOOL], [1], [DLL creation program])dnl test -z "$OBJDUMP" && OBJDUMP=objdump _LT_DECL([], [OBJDUMP], [1], [Object dumper program])dnl ])# win32-dll AU_DEFUN([AC_LIBTOOL_WIN32_DLL], [AC_REQUIRE([AC_CANONICAL_HOST])dnl _LT_SET_OPTION([LT_INIT], [win32-dll]) AC_DIAGNOSE([obsolete], [$0: Remove this warning and the call to _LT_SET_OPTION when you put the `win32-dll' option into LT_INIT's first parameter.]) ]) dnl aclocal-1.4 backwards compatibility: dnl AC_DEFUN([AC_LIBTOOL_WIN32_DLL], []) # _LT_ENABLE_SHARED([DEFAULT]) # ---------------------------- # implement the --enable-shared flag, and supports the `shared' and # `disable-shared' LT_INIT options. # DEFAULT is either `yes' or `no'. If omitted, it defaults to `yes'. m4_define([_LT_ENABLE_SHARED], [m4_define([_LT_ENABLE_SHARED_DEFAULT], [m4_if($1, no, no, yes)])dnl AC_ARG_ENABLE([shared], [AS_HELP_STRING([--enable-shared@<:@=PKGS@:>@], [build shared libraries @<:@default=]_LT_ENABLE_SHARED_DEFAULT[@:>@])], [p=${PACKAGE-default} case $enableval in yes) enable_shared=yes ;; no) enable_shared=no ;; *) enable_shared=no # Look at the argument we got. We use all the common list separators. lt_save_ifs="$IFS"; IFS="${IFS}$PATH_SEPARATOR," for pkg in $enableval; do IFS="$lt_save_ifs" if test "X$pkg" = "X$p"; then enable_shared=yes fi done IFS="$lt_save_ifs" ;; esac], [enable_shared=]_LT_ENABLE_SHARED_DEFAULT) _LT_DECL([build_libtool_libs], [enable_shared], [0], [Whether or not to build shared libraries]) ])# _LT_ENABLE_SHARED LT_OPTION_DEFINE([LT_INIT], [shared], [_LT_ENABLE_SHARED([yes])]) LT_OPTION_DEFINE([LT_INIT], [disable-shared], [_LT_ENABLE_SHARED([no])]) # Old names: AC_DEFUN([AC_ENABLE_SHARED], [_LT_SET_OPTION([LT_INIT], m4_if([$1], [no], [disable-])[shared]) ]) AC_DEFUN([AC_DISABLE_SHARED], [_LT_SET_OPTION([LT_INIT], [disable-shared]) ]) AU_DEFUN([AM_ENABLE_SHARED], [AC_ENABLE_SHARED($@)]) AU_DEFUN([AM_DISABLE_SHARED], [AC_DISABLE_SHARED($@)]) dnl aclocal-1.4 backwards compatibility: dnl AC_DEFUN([AM_ENABLE_SHARED], []) dnl AC_DEFUN([AM_DISABLE_SHARED], []) # _LT_ENABLE_STATIC([DEFAULT]) # ---------------------------- # implement the --enable-static flag, and support the `static' and # `disable-static' LT_INIT options. # DEFAULT is either `yes' or `no'. If omitted, it defaults to `yes'. m4_define([_LT_ENABLE_STATIC], [m4_define([_LT_ENABLE_STATIC_DEFAULT], [m4_if($1, no, no, yes)])dnl AC_ARG_ENABLE([static], [AS_HELP_STRING([--enable-static@<:@=PKGS@:>@], [build static libraries @<:@default=]_LT_ENABLE_STATIC_DEFAULT[@:>@])], [p=${PACKAGE-default} case $enableval in yes) enable_static=yes ;; no) enable_static=no ;; *) enable_static=no # Look at the argument we got. We use all the common list separators. lt_save_ifs="$IFS"; IFS="${IFS}$PATH_SEPARATOR," for pkg in $enableval; do IFS="$lt_save_ifs" if test "X$pkg" = "X$p"; then enable_static=yes fi done IFS="$lt_save_ifs" ;; esac], [enable_static=]_LT_ENABLE_STATIC_DEFAULT) _LT_DECL([build_old_libs], [enable_static], [0], [Whether or not to build static libraries]) ])# _LT_ENABLE_STATIC LT_OPTION_DEFINE([LT_INIT], [static], [_LT_ENABLE_STATIC([yes])]) LT_OPTION_DEFINE([LT_INIT], [disable-static], [_LT_ENABLE_STATIC([no])]) # Old names: AC_DEFUN([AC_ENABLE_STATIC], [_LT_SET_OPTION([LT_INIT], m4_if([$1], [no], [disable-])[static]) ]) AC_DEFUN([AC_DISABLE_STATIC], [_LT_SET_OPTION([LT_INIT], [disable-static]) ]) AU_DEFUN([AM_ENABLE_STATIC], [AC_ENABLE_STATIC($@)]) AU_DEFUN([AM_DISABLE_STATIC], [AC_DISABLE_STATIC($@)]) dnl aclocal-1.4 backwards compatibility: dnl AC_DEFUN([AM_ENABLE_STATIC], []) dnl AC_DEFUN([AM_DISABLE_STATIC], []) # _LT_ENABLE_FAST_INSTALL([DEFAULT]) # ---------------------------------- # implement the --enable-fast-install flag, and support the `fast-install' # and `disable-fast-install' LT_INIT options. # DEFAULT is either `yes' or `no'. If omitted, it defaults to `yes'. m4_define([_LT_ENABLE_FAST_INSTALL], [m4_define([_LT_ENABLE_FAST_INSTALL_DEFAULT], [m4_if($1, no, no, yes)])dnl AC_ARG_ENABLE([fast-install], [AS_HELP_STRING([--enable-fast-install@<:@=PKGS@:>@], [optimize for fast installation @<:@default=]_LT_ENABLE_FAST_INSTALL_DEFAULT[@:>@])], [p=${PACKAGE-default} case $enableval in yes) enable_fast_install=yes ;; no) enable_fast_install=no ;; *) enable_fast_install=no # Look at the argument we got. We use all the common list separators. lt_save_ifs="$IFS"; IFS="${IFS}$PATH_SEPARATOR," for pkg in $enableval; do IFS="$lt_save_ifs" if test "X$pkg" = "X$p"; then enable_fast_install=yes fi done IFS="$lt_save_ifs" ;; esac], [enable_fast_install=]_LT_ENABLE_FAST_INSTALL_DEFAULT) _LT_DECL([fast_install], [enable_fast_install], [0], [Whether or not to optimize for fast installation])dnl ])# _LT_ENABLE_FAST_INSTALL LT_OPTION_DEFINE([LT_INIT], [fast-install], [_LT_ENABLE_FAST_INSTALL([yes])]) LT_OPTION_DEFINE([LT_INIT], [disable-fast-install], [_LT_ENABLE_FAST_INSTALL([no])]) # Old names: AU_DEFUN([AC_ENABLE_FAST_INSTALL], [_LT_SET_OPTION([LT_INIT], m4_if([$1], [no], [disable-])[fast-install]) AC_DIAGNOSE([obsolete], [$0: Remove this warning and the call to _LT_SET_OPTION when you put the `fast-install' option into LT_INIT's first parameter.]) ]) AU_DEFUN([AC_DISABLE_FAST_INSTALL], [_LT_SET_OPTION([LT_INIT], [disable-fast-install]) AC_DIAGNOSE([obsolete], [$0: Remove this warning and the call to _LT_SET_OPTION when you put the `disable-fast-install' option into LT_INIT's first parameter.]) ]) dnl aclocal-1.4 backwards compatibility: dnl AC_DEFUN([AC_ENABLE_FAST_INSTALL], []) dnl AC_DEFUN([AM_DISABLE_FAST_INSTALL], []) # _LT_WITH_PIC([MODE]) # -------------------- # implement the --with-pic flag, and support the `pic-only' and `no-pic' # LT_INIT options. # MODE is either `yes' or `no'. If omitted, it defaults to `both'. m4_define([_LT_WITH_PIC], [AC_ARG_WITH([pic], [AS_HELP_STRING([--with-pic@<:@=PKGS@:>@], [try to use only PIC/non-PIC objects @<:@default=use both@:>@])], [lt_p=${PACKAGE-default} case $withval in yes|no) pic_mode=$withval ;; *) pic_mode=default # Look at the argument we got. We use all the common list separators. lt_save_ifs="$IFS"; IFS="${IFS}$PATH_SEPARATOR," for lt_pkg in $withval; do IFS="$lt_save_ifs" if test "X$lt_pkg" = "X$lt_p"; then pic_mode=yes fi done IFS="$lt_save_ifs" ;; esac], [pic_mode=default]) test -z "$pic_mode" && pic_mode=m4_default([$1], [default]) _LT_DECL([], [pic_mode], [0], [What type of objects to build])dnl ])# _LT_WITH_PIC LT_OPTION_DEFINE([LT_INIT], [pic-only], [_LT_WITH_PIC([yes])]) LT_OPTION_DEFINE([LT_INIT], [no-pic], [_LT_WITH_PIC([no])]) # Old name: AU_DEFUN([AC_LIBTOOL_PICMODE], [_LT_SET_OPTION([LT_INIT], [pic-only]) AC_DIAGNOSE([obsolete], [$0: Remove this warning and the call to _LT_SET_OPTION when you put the `pic-only' option into LT_INIT's first parameter.]) ]) dnl aclocal-1.4 backwards compatibility: dnl AC_DEFUN([AC_LIBTOOL_PICMODE], []) ## ----------------- ## ## LTDL_INIT Options ## ## ----------------- ## m4_define([_LTDL_MODE], []) LT_OPTION_DEFINE([LTDL_INIT], [nonrecursive], [m4_define([_LTDL_MODE], [nonrecursive])]) LT_OPTION_DEFINE([LTDL_INIT], [recursive], [m4_define([_LTDL_MODE], [recursive])]) LT_OPTION_DEFINE([LTDL_INIT], [subproject], [m4_define([_LTDL_MODE], [subproject])]) m4_define([_LTDL_TYPE], []) LT_OPTION_DEFINE([LTDL_INIT], [installable], [m4_define([_LTDL_TYPE], [installable])]) LT_OPTION_DEFINE([LTDL_INIT], [convenience], [m4_define([_LTDL_TYPE], [convenience])]) libstoragemgmt-1.2.3/m4/libtool.m40000644000175000017500000105743212540163517013724 00000000000000# libtool.m4 - Configure libtool for the host system. -*-Autoconf-*- # # Copyright (C) 1996, 1997, 1998, 1999, 2000, 2001, 2003, 2004, 2005, # 2006, 2007, 2008, 2009, 2010, 2011 Free Software # Foundation, Inc. # Written by Gordon Matzigkeit, 1996 # # This file is free software; the Free Software Foundation gives # unlimited permission to copy and/or distribute it, with or without # modifications, as long as this notice is preserved. m4_define([_LT_COPYING], [dnl # Copyright (C) 1996, 1997, 1998, 1999, 2000, 2001, 2003, 2004, 2005, # 2006, 2007, 2008, 2009, 2010, 2011 Free Software # Foundation, Inc. # Written by Gordon Matzigkeit, 1996 # # This file is part of GNU Libtool. # # GNU Libtool is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License as # published by the Free Software Foundation; either version 2 of # the License, or (at your option) any later version. # # As a special exception to the GNU General Public License, # if you distribute this file as part of a program or library that # is built using GNU Libtool, you may include this file under the # same distribution terms that you use for the rest of that program. # # GNU Libtool is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with GNU Libtool; see the file COPYING. If not, a copy # can be downloaded from http://www.gnu.org/licenses/gpl.html, or # obtained by writing to the Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. ]) # serial 57 LT_INIT # LT_PREREQ(VERSION) # ------------------ # Complain and exit if this libtool version is less that VERSION. m4_defun([LT_PREREQ], [m4_if(m4_version_compare(m4_defn([LT_PACKAGE_VERSION]), [$1]), -1, [m4_default([$3], [m4_fatal([Libtool version $1 or higher is required], 63)])], [$2])]) # _LT_CHECK_BUILDDIR # ------------------ # Complain if the absolute build directory name contains unusual characters m4_defun([_LT_CHECK_BUILDDIR], [case `pwd` in *\ * | *\ *) AC_MSG_WARN([Libtool does not cope well with whitespace in `pwd`]) ;; esac ]) # LT_INIT([OPTIONS]) # ------------------ AC_DEFUN([LT_INIT], [AC_PREREQ([2.58])dnl We use AC_INCLUDES_DEFAULT AC_REQUIRE([AC_CONFIG_AUX_DIR_DEFAULT])dnl AC_BEFORE([$0], [LT_LANG])dnl AC_BEFORE([$0], [LT_OUTPUT])dnl AC_BEFORE([$0], [LTDL_INIT])dnl m4_require([_LT_CHECK_BUILDDIR])dnl dnl Autoconf doesn't catch unexpanded LT_ macros by default: m4_pattern_forbid([^_?LT_[A-Z_]+$])dnl m4_pattern_allow([^(_LT_EOF|LT_DLGLOBAL|LT_DLLAZY_OR_NOW|LT_MULTI_MODULE)$])dnl dnl aclocal doesn't pull ltoptions.m4, ltsugar.m4, or ltversion.m4 dnl unless we require an AC_DEFUNed macro: AC_REQUIRE([LTOPTIONS_VERSION])dnl AC_REQUIRE([LTSUGAR_VERSION])dnl AC_REQUIRE([LTVERSION_VERSION])dnl AC_REQUIRE([LTOBSOLETE_VERSION])dnl m4_require([_LT_PROG_LTMAIN])dnl _LT_SHELL_INIT([SHELL=${CONFIG_SHELL-/bin/sh}]) dnl Parse OPTIONS _LT_SET_OPTIONS([$0], [$1]) # This can be used to rebuild libtool when needed LIBTOOL_DEPS="$ltmain" # Always use our own libtool. LIBTOOL='$(SHELL) $(top_builddir)/libtool' AC_SUBST(LIBTOOL)dnl _LT_SETUP # Only expand once: m4_define([LT_INIT]) ])# LT_INIT # Old names: AU_ALIAS([AC_PROG_LIBTOOL], [LT_INIT]) AU_ALIAS([AM_PROG_LIBTOOL], [LT_INIT]) dnl aclocal-1.4 backwards compatibility: dnl AC_DEFUN([AC_PROG_LIBTOOL], []) dnl AC_DEFUN([AM_PROG_LIBTOOL], []) # _LT_CC_BASENAME(CC) # ------------------- # Calculate cc_basename. Skip known compiler wrappers and cross-prefix. m4_defun([_LT_CC_BASENAME], [for cc_temp in $1""; do case $cc_temp in compile | *[[\\/]]compile | ccache | *[[\\/]]ccache ) ;; distcc | *[[\\/]]distcc | purify | *[[\\/]]purify ) ;; \-*) ;; *) break;; esac done cc_basename=`$ECHO "$cc_temp" | $SED "s%.*/%%; s%^$host_alias-%%"` ]) # _LT_FILEUTILS_DEFAULTS # ---------------------- # It is okay to use these file commands and assume they have been set # sensibly after `m4_require([_LT_FILEUTILS_DEFAULTS])'. m4_defun([_LT_FILEUTILS_DEFAULTS], [: ${CP="cp -f"} : ${MV="mv -f"} : ${RM="rm -f"} ])# _LT_FILEUTILS_DEFAULTS # _LT_SETUP # --------- m4_defun([_LT_SETUP], [AC_REQUIRE([AC_CANONICAL_HOST])dnl AC_REQUIRE([AC_CANONICAL_BUILD])dnl AC_REQUIRE([_LT_PREPARE_SED_QUOTE_VARS])dnl AC_REQUIRE([_LT_PROG_ECHO_BACKSLASH])dnl _LT_DECL([], [PATH_SEPARATOR], [1], [The PATH separator for the build system])dnl dnl _LT_DECL([], [host_alias], [0], [The host system])dnl _LT_DECL([], [host], [0])dnl _LT_DECL([], [host_os], [0])dnl dnl _LT_DECL([], [build_alias], [0], [The build system])dnl _LT_DECL([], [build], [0])dnl _LT_DECL([], [build_os], [0])dnl dnl AC_REQUIRE([AC_PROG_CC])dnl AC_REQUIRE([LT_PATH_LD])dnl AC_REQUIRE([LT_PATH_NM])dnl dnl AC_REQUIRE([AC_PROG_LN_S])dnl test -z "$LN_S" && LN_S="ln -s" _LT_DECL([], [LN_S], [1], [Whether we need soft or hard links])dnl dnl AC_REQUIRE([LT_CMD_MAX_LEN])dnl _LT_DECL([objext], [ac_objext], [0], [Object file suffix (normally "o")])dnl _LT_DECL([], [exeext], [0], [Executable file suffix (normally "")])dnl dnl m4_require([_LT_FILEUTILS_DEFAULTS])dnl m4_require([_LT_CHECK_SHELL_FEATURES])dnl m4_require([_LT_PATH_CONVERSION_FUNCTIONS])dnl m4_require([_LT_CMD_RELOAD])dnl m4_require([_LT_CHECK_MAGIC_METHOD])dnl m4_require([_LT_CHECK_SHAREDLIB_FROM_LINKLIB])dnl m4_require([_LT_CMD_OLD_ARCHIVE])dnl m4_require([_LT_CMD_GLOBAL_SYMBOLS])dnl m4_require([_LT_WITH_SYSROOT])dnl _LT_CONFIG_LIBTOOL_INIT([ # See if we are running on zsh, and set the options which allow our # commands through without removal of \ escapes INIT. if test -n "\${ZSH_VERSION+set}" ; then setopt NO_GLOB_SUBST fi ]) if test -n "${ZSH_VERSION+set}" ; then setopt NO_GLOB_SUBST fi _LT_CHECK_OBJDIR m4_require([_LT_TAG_COMPILER])dnl case $host_os in aix3*) # AIX sometimes has problems with the GCC collect2 program. For some # reason, if we set the COLLECT_NAMES environment variable, the problems # vanish in a puff of smoke. if test "X${COLLECT_NAMES+set}" != Xset; then COLLECT_NAMES= export COLLECT_NAMES fi ;; esac # Global variables: ofile=libtool can_build_shared=yes # All known linkers require a `.a' archive for static linking (except MSVC, # which needs '.lib'). libext=a with_gnu_ld="$lt_cv_prog_gnu_ld" old_CC="$CC" old_CFLAGS="$CFLAGS" # Set sane defaults for various variables test -z "$CC" && CC=cc test -z "$LTCC" && LTCC=$CC test -z "$LTCFLAGS" && LTCFLAGS=$CFLAGS test -z "$LD" && LD=ld test -z "$ac_objext" && ac_objext=o _LT_CC_BASENAME([$compiler]) # Only perform the check for file, if the check method requires it test -z "$MAGIC_CMD" && MAGIC_CMD=file case $deplibs_check_method in file_magic*) if test "$file_magic_cmd" = '$MAGIC_CMD'; then _LT_PATH_MAGIC fi ;; esac # Use C for the default configuration in the libtool script LT_SUPPORTED_TAG([CC]) _LT_LANG_C_CONFIG _LT_LANG_DEFAULT_CONFIG _LT_CONFIG_COMMANDS ])# _LT_SETUP # _LT_PREPARE_SED_QUOTE_VARS # -------------------------- # Define a few sed substitution that help us do robust quoting. m4_defun([_LT_PREPARE_SED_QUOTE_VARS], [# Backslashify metacharacters that are still active within # double-quoted strings. sed_quote_subst='s/\([["`$\\]]\)/\\\1/g' # Same as above, but do not quote variable references. double_quote_subst='s/\([["`\\]]\)/\\\1/g' # Sed substitution to delay expansion of an escaped shell variable in a # double_quote_subst'ed string. delay_variable_subst='s/\\\\\\\\\\\$/\\\\\\$/g' # Sed substitution to delay expansion of an escaped single quote. delay_single_quote_subst='s/'\''/'\'\\\\\\\'\''/g' # Sed substitution to avoid accidental globbing in evaled expressions no_glob_subst='s/\*/\\\*/g' ]) # _LT_PROG_LTMAIN # --------------- # Note that this code is called both from `configure', and `config.status' # now that we use AC_CONFIG_COMMANDS to generate libtool. Notably, # `config.status' has no value for ac_aux_dir unless we are using Automake, # so we pass a copy along to make sure it has a sensible value anyway. m4_defun([_LT_PROG_LTMAIN], [m4_ifdef([AC_REQUIRE_AUX_FILE], [AC_REQUIRE_AUX_FILE([ltmain.sh])])dnl _LT_CONFIG_LIBTOOL_INIT([ac_aux_dir='$ac_aux_dir']) ltmain="$ac_aux_dir/ltmain.sh" ])# _LT_PROG_LTMAIN ## ------------------------------------- ## ## Accumulate code for creating libtool. ## ## ------------------------------------- ## # So that we can recreate a full libtool script including additional # tags, we accumulate the chunks of code to send to AC_CONFIG_COMMANDS # in macros and then make a single call at the end using the `libtool' # label. # _LT_CONFIG_LIBTOOL_INIT([INIT-COMMANDS]) # ---------------------------------------- # Register INIT-COMMANDS to be passed to AC_CONFIG_COMMANDS later. m4_define([_LT_CONFIG_LIBTOOL_INIT], [m4_ifval([$1], [m4_append([_LT_OUTPUT_LIBTOOL_INIT], [$1 ])])]) # Initialize. m4_define([_LT_OUTPUT_LIBTOOL_INIT]) # _LT_CONFIG_LIBTOOL([COMMANDS]) # ------------------------------ # Register COMMANDS to be passed to AC_CONFIG_COMMANDS later. m4_define([_LT_CONFIG_LIBTOOL], [m4_ifval([$1], [m4_append([_LT_OUTPUT_LIBTOOL_COMMANDS], [$1 ])])]) # Initialize. m4_define([_LT_OUTPUT_LIBTOOL_COMMANDS]) # _LT_CONFIG_SAVE_COMMANDS([COMMANDS], [INIT_COMMANDS]) # ----------------------------------------------------- m4_defun([_LT_CONFIG_SAVE_COMMANDS], [_LT_CONFIG_LIBTOOL([$1]) _LT_CONFIG_LIBTOOL_INIT([$2]) ]) # _LT_FORMAT_COMMENT([COMMENT]) # ----------------------------- # Add leading comment marks to the start of each line, and a trailing # full-stop to the whole comment if one is not present already. m4_define([_LT_FORMAT_COMMENT], [m4_ifval([$1], [ m4_bpatsubst([m4_bpatsubst([$1], [^ *], [# ])], [['`$\]], [\\\&])]m4_bmatch([$1], [[!?.]$], [], [.]) )]) ## ------------------------ ## ## FIXME: Eliminate VARNAME ## ## ------------------------ ## # _LT_DECL([CONFIGNAME], VARNAME, VALUE, [DESCRIPTION], [IS-TAGGED?]) # ------------------------------------------------------------------- # CONFIGNAME is the name given to the value in the libtool script. # VARNAME is the (base) name used in the configure script. # VALUE may be 0, 1 or 2 for a computed quote escaped value based on # VARNAME. Any other value will be used directly. m4_define([_LT_DECL], [lt_if_append_uniq([lt_decl_varnames], [$2], [, ], [lt_dict_add_subkey([lt_decl_dict], [$2], [libtool_name], [m4_ifval([$1], [$1], [$2])]) lt_dict_add_subkey([lt_decl_dict], [$2], [value], [$3]) m4_ifval([$4], [lt_dict_add_subkey([lt_decl_dict], [$2], [description], [$4])]) lt_dict_add_subkey([lt_decl_dict], [$2], [tagged?], [m4_ifval([$5], [yes], [no])])]) ]) # _LT_TAGDECL([CONFIGNAME], VARNAME, VALUE, [DESCRIPTION]) # -------------------------------------------------------- m4_define([_LT_TAGDECL], [_LT_DECL([$1], [$2], [$3], [$4], [yes])]) # lt_decl_tag_varnames([SEPARATOR], [VARNAME1...]) # ------------------------------------------------ m4_define([lt_decl_tag_varnames], [_lt_decl_filter([tagged?], [yes], $@)]) # _lt_decl_filter(SUBKEY, VALUE, [SEPARATOR], [VARNAME1..]) # --------------------------------------------------------- m4_define([_lt_decl_filter], [m4_case([$#], [0], [m4_fatal([$0: too few arguments: $#])], [1], [m4_fatal([$0: too few arguments: $#: $1])], [2], [lt_dict_filter([lt_decl_dict], [$1], [$2], [], lt_decl_varnames)], [3], [lt_dict_filter([lt_decl_dict], [$1], [$2], [$3], lt_decl_varnames)], [lt_dict_filter([lt_decl_dict], $@)])[]dnl ]) # lt_decl_quote_varnames([SEPARATOR], [VARNAME1...]) # -------------------------------------------------- m4_define([lt_decl_quote_varnames], [_lt_decl_filter([value], [1], $@)]) # lt_decl_dquote_varnames([SEPARATOR], [VARNAME1...]) # --------------------------------------------------- m4_define([lt_decl_dquote_varnames], [_lt_decl_filter([value], [2], $@)]) # lt_decl_varnames_tagged([SEPARATOR], [VARNAME1...]) # --------------------------------------------------- m4_define([lt_decl_varnames_tagged], [m4_assert([$# <= 2])dnl _$0(m4_quote(m4_default([$1], [[, ]])), m4_ifval([$2], [[$2]], [m4_dquote(lt_decl_tag_varnames)]), m4_split(m4_normalize(m4_quote(_LT_TAGS)), [ ]))]) m4_define([_lt_decl_varnames_tagged], [m4_ifval([$3], [lt_combine([$1], [$2], [_], $3)])]) # lt_decl_all_varnames([SEPARATOR], [VARNAME1...]) # ------------------------------------------------ m4_define([lt_decl_all_varnames], [_$0(m4_quote(m4_default([$1], [[, ]])), m4_if([$2], [], m4_quote(lt_decl_varnames), m4_quote(m4_shift($@))))[]dnl ]) m4_define([_lt_decl_all_varnames], [lt_join($@, lt_decl_varnames_tagged([$1], lt_decl_tag_varnames([[, ]], m4_shift($@))))dnl ]) # _LT_CONFIG_STATUS_DECLARE([VARNAME]) # ------------------------------------ # Quote a variable value, and forward it to `config.status' so that its # declaration there will have the same value as in `configure'. VARNAME # must have a single quote delimited value for this to work. m4_define([_LT_CONFIG_STATUS_DECLARE], [$1='`$ECHO "$][$1" | $SED "$delay_single_quote_subst"`']) # _LT_CONFIG_STATUS_DECLARATIONS # ------------------------------ # We delimit libtool config variables with single quotes, so when # we write them to config.status, we have to be sure to quote all # embedded single quotes properly. In configure, this macro expands # each variable declared with _LT_DECL (and _LT_TAGDECL) into: # # ='`$ECHO "$" | $SED "$delay_single_quote_subst"`' m4_defun([_LT_CONFIG_STATUS_DECLARATIONS], [m4_foreach([_lt_var], m4_quote(lt_decl_all_varnames), [m4_n([_LT_CONFIG_STATUS_DECLARE(_lt_var)])])]) # _LT_LIBTOOL_TAGS # ---------------- # Output comment and list of tags supported by the script m4_defun([_LT_LIBTOOL_TAGS], [_LT_FORMAT_COMMENT([The names of the tagged configurations supported by this script])dnl available_tags="_LT_TAGS"dnl ]) # _LT_LIBTOOL_DECLARE(VARNAME, [TAG]) # ----------------------------------- # Extract the dictionary values for VARNAME (optionally with TAG) and # expand to a commented shell variable setting: # # # Some comment about what VAR is for. # visible_name=$lt_internal_name m4_define([_LT_LIBTOOL_DECLARE], [_LT_FORMAT_COMMENT(m4_quote(lt_dict_fetch([lt_decl_dict], [$1], [description])))[]dnl m4_pushdef([_libtool_name], m4_quote(lt_dict_fetch([lt_decl_dict], [$1], [libtool_name])))[]dnl m4_case(m4_quote(lt_dict_fetch([lt_decl_dict], [$1], [value])), [0], [_libtool_name=[$]$1], [1], [_libtool_name=$lt_[]$1], [2], [_libtool_name=$lt_[]$1], [_libtool_name=lt_dict_fetch([lt_decl_dict], [$1], [value])])[]dnl m4_ifval([$2], [_$2])[]m4_popdef([_libtool_name])[]dnl ]) # _LT_LIBTOOL_CONFIG_VARS # ----------------------- # Produce commented declarations of non-tagged libtool config variables # suitable for insertion in the LIBTOOL CONFIG section of the `libtool' # script. Tagged libtool config variables (even for the LIBTOOL CONFIG # section) are produced by _LT_LIBTOOL_TAG_VARS. m4_defun([_LT_LIBTOOL_CONFIG_VARS], [m4_foreach([_lt_var], m4_quote(_lt_decl_filter([tagged?], [no], [], lt_decl_varnames)), [m4_n([_LT_LIBTOOL_DECLARE(_lt_var)])])]) # _LT_LIBTOOL_TAG_VARS(TAG) # ------------------------- m4_define([_LT_LIBTOOL_TAG_VARS], [m4_foreach([_lt_var], m4_quote(lt_decl_tag_varnames), [m4_n([_LT_LIBTOOL_DECLARE(_lt_var, [$1])])])]) # _LT_TAGVAR(VARNAME, [TAGNAME]) # ------------------------------ m4_define([_LT_TAGVAR], [m4_ifval([$2], [$1_$2], [$1])]) # _LT_CONFIG_COMMANDS # ------------------- # Send accumulated output to $CONFIG_STATUS. Thanks to the lists of # variables for single and double quote escaping we saved from calls # to _LT_DECL, we can put quote escaped variables declarations # into `config.status', and then the shell code to quote escape them in # for loops in `config.status'. Finally, any additional code accumulated # from calls to _LT_CONFIG_LIBTOOL_INIT is expanded. m4_defun([_LT_CONFIG_COMMANDS], [AC_PROVIDE_IFELSE([LT_OUTPUT], dnl If the libtool generation code has been placed in $CONFIG_LT, dnl instead of duplicating it all over again into config.status, dnl then we will have config.status run $CONFIG_LT later, so it dnl needs to know what name is stored there: [AC_CONFIG_COMMANDS([libtool], [$SHELL $CONFIG_LT || AS_EXIT(1)], [CONFIG_LT='$CONFIG_LT'])], dnl If the libtool generation code is destined for config.status, dnl expand the accumulated commands and init code now: [AC_CONFIG_COMMANDS([libtool], [_LT_OUTPUT_LIBTOOL_COMMANDS], [_LT_OUTPUT_LIBTOOL_COMMANDS_INIT])]) ])#_LT_CONFIG_COMMANDS # Initialize. m4_define([_LT_OUTPUT_LIBTOOL_COMMANDS_INIT], [ # The HP-UX ksh and POSIX shell print the target directory to stdout # if CDPATH is set. (unset CDPATH) >/dev/null 2>&1 && unset CDPATH sed_quote_subst='$sed_quote_subst' double_quote_subst='$double_quote_subst' delay_variable_subst='$delay_variable_subst' _LT_CONFIG_STATUS_DECLARATIONS LTCC='$LTCC' LTCFLAGS='$LTCFLAGS' compiler='$compiler_DEFAULT' # A function that is used when there is no print builtin or printf. func_fallback_echo () { eval 'cat <<_LTECHO_EOF \$[]1 _LTECHO_EOF' } # Quote evaled strings. for var in lt_decl_all_varnames([[ \ ]], lt_decl_quote_varnames); do case \`eval \\\\\$ECHO \\\\""\\\\\$\$var"\\\\"\` in *[[\\\\\\\`\\"\\\$]]*) eval "lt_\$var=\\\\\\"\\\`\\\$ECHO \\"\\\$\$var\\" | \\\$SED \\"\\\$sed_quote_subst\\"\\\`\\\\\\"" ;; *) eval "lt_\$var=\\\\\\"\\\$\$var\\\\\\"" ;; esac done # Double-quote double-evaled strings. for var in lt_decl_all_varnames([[ \ ]], lt_decl_dquote_varnames); do case \`eval \\\\\$ECHO \\\\""\\\\\$\$var"\\\\"\` in *[[\\\\\\\`\\"\\\$]]*) eval "lt_\$var=\\\\\\"\\\`\\\$ECHO \\"\\\$\$var\\" | \\\$SED -e \\"\\\$double_quote_subst\\" -e \\"\\\$sed_quote_subst\\" -e \\"\\\$delay_variable_subst\\"\\\`\\\\\\"" ;; *) eval "lt_\$var=\\\\\\"\\\$\$var\\\\\\"" ;; esac done _LT_OUTPUT_LIBTOOL_INIT ]) # _LT_GENERATED_FILE_INIT(FILE, [COMMENT]) # ------------------------------------ # Generate a child script FILE with all initialization necessary to # reuse the environment learned by the parent script, and make the # file executable. If COMMENT is supplied, it is inserted after the # `#!' sequence but before initialization text begins. After this # macro, additional text can be appended to FILE to form the body of # the child script. The macro ends with non-zero status if the # file could not be fully written (such as if the disk is full). m4_ifdef([AS_INIT_GENERATED], [m4_defun([_LT_GENERATED_FILE_INIT],[AS_INIT_GENERATED($@)])], [m4_defun([_LT_GENERATED_FILE_INIT], [m4_require([AS_PREPARE])]dnl [m4_pushdef([AS_MESSAGE_LOG_FD])]dnl [lt_write_fail=0 cat >$1 <<_ASEOF || lt_write_fail=1 #! $SHELL # Generated by $as_me. $2 SHELL=\${CONFIG_SHELL-$SHELL} export SHELL _ASEOF cat >>$1 <<\_ASEOF || lt_write_fail=1 AS_SHELL_SANITIZE _AS_PREPARE exec AS_MESSAGE_FD>&1 _ASEOF test $lt_write_fail = 0 && chmod +x $1[]dnl m4_popdef([AS_MESSAGE_LOG_FD])])])# _LT_GENERATED_FILE_INIT # LT_OUTPUT # --------- # This macro allows early generation of the libtool script (before # AC_OUTPUT is called), incase it is used in configure for compilation # tests. AC_DEFUN([LT_OUTPUT], [: ${CONFIG_LT=./config.lt} AC_MSG_NOTICE([creating $CONFIG_LT]) _LT_GENERATED_FILE_INIT(["$CONFIG_LT"], [# Run this file to recreate a libtool stub with the current configuration.]) cat >>"$CONFIG_LT" <<\_LTEOF lt_cl_silent=false exec AS_MESSAGE_LOG_FD>>config.log { echo AS_BOX([Running $as_me.]) } >&AS_MESSAGE_LOG_FD lt_cl_help="\ \`$as_me' creates a local libtool stub from the current configuration, for use in further configure time tests before the real libtool is generated. Usage: $[0] [[OPTIONS]] -h, --help print this help, then exit -V, --version print version number, then exit -q, --quiet do not print progress messages -d, --debug don't remove temporary files Report bugs to ." lt_cl_version="\ m4_ifset([AC_PACKAGE_NAME], [AC_PACKAGE_NAME ])config.lt[]dnl m4_ifset([AC_PACKAGE_VERSION], [ AC_PACKAGE_VERSION]) configured by $[0], generated by m4_PACKAGE_STRING. Copyright (C) 2011 Free Software Foundation, Inc. This config.lt script is free software; the Free Software Foundation gives unlimited permision to copy, distribute and modify it." while test $[#] != 0 do case $[1] in --version | --v* | -V ) echo "$lt_cl_version"; exit 0 ;; --help | --h* | -h ) echo "$lt_cl_help"; exit 0 ;; --debug | --d* | -d ) debug=: ;; --quiet | --q* | --silent | --s* | -q ) lt_cl_silent=: ;; -*) AC_MSG_ERROR([unrecognized option: $[1] Try \`$[0] --help' for more information.]) ;; *) AC_MSG_ERROR([unrecognized argument: $[1] Try \`$[0] --help' for more information.]) ;; esac shift done if $lt_cl_silent; then exec AS_MESSAGE_FD>/dev/null fi _LTEOF cat >>"$CONFIG_LT" <<_LTEOF _LT_OUTPUT_LIBTOOL_COMMANDS_INIT _LTEOF cat >>"$CONFIG_LT" <<\_LTEOF AC_MSG_NOTICE([creating $ofile]) _LT_OUTPUT_LIBTOOL_COMMANDS AS_EXIT(0) _LTEOF chmod +x "$CONFIG_LT" # configure is writing to config.log, but config.lt does its own redirection, # appending to config.log, which fails on DOS, as config.log is still kept # open by configure. Here we exec the FD to /dev/null, effectively closing # config.log, so it can be properly (re)opened and appended to by config.lt. lt_cl_success=: test "$silent" = yes && lt_config_lt_args="$lt_config_lt_args --quiet" exec AS_MESSAGE_LOG_FD>/dev/null $SHELL "$CONFIG_LT" $lt_config_lt_args || lt_cl_success=false exec AS_MESSAGE_LOG_FD>>config.log $lt_cl_success || AS_EXIT(1) ])# LT_OUTPUT # _LT_CONFIG(TAG) # --------------- # If TAG is the built-in tag, create an initial libtool script with a # default configuration from the untagged config vars. Otherwise add code # to config.status for appending the configuration named by TAG from the # matching tagged config vars. m4_defun([_LT_CONFIG], [m4_require([_LT_FILEUTILS_DEFAULTS])dnl _LT_CONFIG_SAVE_COMMANDS([ m4_define([_LT_TAG], m4_if([$1], [], [C], [$1]))dnl m4_if(_LT_TAG, [C], [ # See if we are running on zsh, and set the options which allow our # commands through without removal of \ escapes. if test -n "${ZSH_VERSION+set}" ; then setopt NO_GLOB_SUBST fi cfgfile="${ofile}T" trap "$RM \"$cfgfile\"; exit 1" 1 2 15 $RM "$cfgfile" cat <<_LT_EOF >> "$cfgfile" #! $SHELL # `$ECHO "$ofile" | sed 's%^.*/%%'` - Provide generalized library-building support services. # Generated automatically by $as_me ($PACKAGE$TIMESTAMP) $VERSION # Libtool was configured on host `(hostname || uname -n) 2>/dev/null | sed 1q`: # NOTE: Changes made to this file will be lost: look at ltmain.sh. # _LT_COPYING _LT_LIBTOOL_TAGS # ### BEGIN LIBTOOL CONFIG _LT_LIBTOOL_CONFIG_VARS _LT_LIBTOOL_TAG_VARS # ### END LIBTOOL CONFIG _LT_EOF case $host_os in aix3*) cat <<\_LT_EOF >> "$cfgfile" # AIX sometimes has problems with the GCC collect2 program. For some # reason, if we set the COLLECT_NAMES environment variable, the problems # vanish in a puff of smoke. if test "X${COLLECT_NAMES+set}" != Xset; then COLLECT_NAMES= export COLLECT_NAMES fi _LT_EOF ;; esac _LT_PROG_LTMAIN # We use sed instead of cat because bash on DJGPP gets confused if # if finds mixed CR/LF and LF-only lines. Since sed operates in # text mode, it properly converts lines to CR/LF. This bash problem # is reportedly fixed, but why not run on old versions too? sed '$q' "$ltmain" >> "$cfgfile" \ || (rm -f "$cfgfile"; exit 1) _LT_PROG_REPLACE_SHELLFNS mv -f "$cfgfile" "$ofile" || (rm -f "$ofile" && cp "$cfgfile" "$ofile" && rm -f "$cfgfile") chmod +x "$ofile" ], [cat <<_LT_EOF >> "$ofile" dnl Unfortunately we have to use $1 here, since _LT_TAG is not expanded dnl in a comment (ie after a #). # ### BEGIN LIBTOOL TAG CONFIG: $1 _LT_LIBTOOL_TAG_VARS(_LT_TAG) # ### END LIBTOOL TAG CONFIG: $1 _LT_EOF ])dnl /m4_if ], [m4_if([$1], [], [ PACKAGE='$PACKAGE' VERSION='$VERSION' TIMESTAMP='$TIMESTAMP' RM='$RM' ofile='$ofile'], []) ])dnl /_LT_CONFIG_SAVE_COMMANDS ])# _LT_CONFIG # LT_SUPPORTED_TAG(TAG) # --------------------- # Trace this macro to discover what tags are supported by the libtool # --tag option, using: # autoconf --trace 'LT_SUPPORTED_TAG:$1' AC_DEFUN([LT_SUPPORTED_TAG], []) # C support is built-in for now m4_define([_LT_LANG_C_enabled], []) m4_define([_LT_TAGS], []) # LT_LANG(LANG) # ------------- # Enable libtool support for the given language if not already enabled. AC_DEFUN([LT_LANG], [AC_BEFORE([$0], [LT_OUTPUT])dnl m4_case([$1], [C], [_LT_LANG(C)], [C++], [_LT_LANG(CXX)], [Go], [_LT_LANG(GO)], [Java], [_LT_LANG(GCJ)], [Fortran 77], [_LT_LANG(F77)], [Fortran], [_LT_LANG(FC)], [Windows Resource], [_LT_LANG(RC)], [m4_ifdef([_LT_LANG_]$1[_CONFIG], [_LT_LANG($1)], [m4_fatal([$0: unsupported language: "$1"])])])dnl ])# LT_LANG # _LT_LANG(LANGNAME) # ------------------ m4_defun([_LT_LANG], [m4_ifdef([_LT_LANG_]$1[_enabled], [], [LT_SUPPORTED_TAG([$1])dnl m4_append([_LT_TAGS], [$1 ])dnl m4_define([_LT_LANG_]$1[_enabled], [])dnl _LT_LANG_$1_CONFIG($1)])dnl ])# _LT_LANG m4_ifndef([AC_PROG_GO], [ ############################################################ # NOTE: This macro has been submitted for inclusion into # # GNU Autoconf as AC_PROG_GO. When it is available in # # a released version of Autoconf we should remove this # # macro and use it instead. # ############################################################ m4_defun([AC_PROG_GO], [AC_LANG_PUSH(Go)dnl AC_ARG_VAR([GOC], [Go compiler command])dnl AC_ARG_VAR([GOFLAGS], [Go compiler flags])dnl _AC_ARG_VAR_LDFLAGS()dnl AC_CHECK_TOOL(GOC, gccgo) if test -z "$GOC"; then if test -n "$ac_tool_prefix"; then AC_CHECK_PROG(GOC, [${ac_tool_prefix}gccgo], [${ac_tool_prefix}gccgo]) fi fi if test -z "$GOC"; then AC_CHECK_PROG(GOC, gccgo, gccgo, false) fi ])#m4_defun ])#m4_ifndef # _LT_LANG_DEFAULT_CONFIG # ----------------------- m4_defun([_LT_LANG_DEFAULT_CONFIG], [AC_PROVIDE_IFELSE([AC_PROG_CXX], [LT_LANG(CXX)], [m4_define([AC_PROG_CXX], defn([AC_PROG_CXX])[LT_LANG(CXX)])]) AC_PROVIDE_IFELSE([AC_PROG_F77], [LT_LANG(F77)], [m4_define([AC_PROG_F77], defn([AC_PROG_F77])[LT_LANG(F77)])]) AC_PROVIDE_IFELSE([AC_PROG_FC], [LT_LANG(FC)], [m4_define([AC_PROG_FC], defn([AC_PROG_FC])[LT_LANG(FC)])]) dnl The call to [A][M_PROG_GCJ] is quoted like that to stop aclocal dnl pulling things in needlessly. AC_PROVIDE_IFELSE([AC_PROG_GCJ], [LT_LANG(GCJ)], [AC_PROVIDE_IFELSE([A][M_PROG_GCJ], [LT_LANG(GCJ)], [AC_PROVIDE_IFELSE([LT_PROG_GCJ], [LT_LANG(GCJ)], [m4_ifdef([AC_PROG_GCJ], [m4_define([AC_PROG_GCJ], defn([AC_PROG_GCJ])[LT_LANG(GCJ)])]) m4_ifdef([A][M_PROG_GCJ], [m4_define([A][M_PROG_GCJ], defn([A][M_PROG_GCJ])[LT_LANG(GCJ)])]) m4_ifdef([LT_PROG_GCJ], [m4_define([LT_PROG_GCJ], defn([LT_PROG_GCJ])[LT_LANG(GCJ)])])])])]) AC_PROVIDE_IFELSE([AC_PROG_GO], [LT_LANG(GO)], [m4_define([AC_PROG_GO], defn([AC_PROG_GO])[LT_LANG(GO)])]) AC_PROVIDE_IFELSE([LT_PROG_RC], [LT_LANG(RC)], [m4_define([LT_PROG_RC], defn([LT_PROG_RC])[LT_LANG(RC)])]) ])# _LT_LANG_DEFAULT_CONFIG # Obsolete macros: AU_DEFUN([AC_LIBTOOL_CXX], [LT_LANG(C++)]) AU_DEFUN([AC_LIBTOOL_F77], [LT_LANG(Fortran 77)]) AU_DEFUN([AC_LIBTOOL_FC], [LT_LANG(Fortran)]) AU_DEFUN([AC_LIBTOOL_GCJ], [LT_LANG(Java)]) AU_DEFUN([AC_LIBTOOL_RC], [LT_LANG(Windows Resource)]) dnl aclocal-1.4 backwards compatibility: dnl AC_DEFUN([AC_LIBTOOL_CXX], []) dnl AC_DEFUN([AC_LIBTOOL_F77], []) dnl AC_DEFUN([AC_LIBTOOL_FC], []) dnl AC_DEFUN([AC_LIBTOOL_GCJ], []) dnl AC_DEFUN([AC_LIBTOOL_RC], []) # _LT_TAG_COMPILER # ---------------- m4_defun([_LT_TAG_COMPILER], [AC_REQUIRE([AC_PROG_CC])dnl _LT_DECL([LTCC], [CC], [1], [A C compiler])dnl _LT_DECL([LTCFLAGS], [CFLAGS], [1], [LTCC compiler flags])dnl _LT_TAGDECL([CC], [compiler], [1], [A language specific compiler])dnl _LT_TAGDECL([with_gcc], [GCC], [0], [Is the compiler the GNU compiler?])dnl # If no C compiler was specified, use CC. LTCC=${LTCC-"$CC"} # If no C compiler flags were specified, use CFLAGS. LTCFLAGS=${LTCFLAGS-"$CFLAGS"} # Allow CC to be a program name with arguments. compiler=$CC ])# _LT_TAG_COMPILER # _LT_COMPILER_BOILERPLATE # ------------------------ # Check for compiler boilerplate output or warnings with # the simple compiler test code. m4_defun([_LT_COMPILER_BOILERPLATE], [m4_require([_LT_DECL_SED])dnl ac_outfile=conftest.$ac_objext echo "$lt_simple_compile_test_code" >conftest.$ac_ext eval "$ac_compile" 2>&1 >/dev/null | $SED '/^$/d; /^ *+/d' >conftest.err _lt_compiler_boilerplate=`cat conftest.err` $RM conftest* ])# _LT_COMPILER_BOILERPLATE # _LT_LINKER_BOILERPLATE # ---------------------- # Check for linker boilerplate output or warnings with # the simple link test code. m4_defun([_LT_LINKER_BOILERPLATE], [m4_require([_LT_DECL_SED])dnl ac_outfile=conftest.$ac_objext echo "$lt_simple_link_test_code" >conftest.$ac_ext eval "$ac_link" 2>&1 >/dev/null | $SED '/^$/d; /^ *+/d' >conftest.err _lt_linker_boilerplate=`cat conftest.err` $RM -r conftest* ])# _LT_LINKER_BOILERPLATE # _LT_REQUIRED_DARWIN_CHECKS # ------------------------- m4_defun_once([_LT_REQUIRED_DARWIN_CHECKS],[ case $host_os in rhapsody* | darwin*) AC_CHECK_TOOL([DSYMUTIL], [dsymutil], [:]) AC_CHECK_TOOL([NMEDIT], [nmedit], [:]) AC_CHECK_TOOL([LIPO], [lipo], [:]) AC_CHECK_TOOL([OTOOL], [otool], [:]) AC_CHECK_TOOL([OTOOL64], [otool64], [:]) _LT_DECL([], [DSYMUTIL], [1], [Tool to manipulate archived DWARF debug symbol files on Mac OS X]) _LT_DECL([], [NMEDIT], [1], [Tool to change global to local symbols on Mac OS X]) _LT_DECL([], [LIPO], [1], [Tool to manipulate fat objects and archives on Mac OS X]) _LT_DECL([], [OTOOL], [1], [ldd/readelf like tool for Mach-O binaries on Mac OS X]) _LT_DECL([], [OTOOL64], [1], [ldd/readelf like tool for 64 bit Mach-O binaries on Mac OS X 10.4]) AC_CACHE_CHECK([for -single_module linker flag],[lt_cv_apple_cc_single_mod], [lt_cv_apple_cc_single_mod=no if test -z "${LT_MULTI_MODULE}"; then # By default we will add the -single_module flag. You can override # by either setting the environment variable LT_MULTI_MODULE # non-empty at configure time, or by adding -multi_module to the # link flags. rm -rf libconftest.dylib* echo "int foo(void){return 1;}" > conftest.c echo "$LTCC $LTCFLAGS $LDFLAGS -o libconftest.dylib \ -dynamiclib -Wl,-single_module conftest.c" >&AS_MESSAGE_LOG_FD $LTCC $LTCFLAGS $LDFLAGS -o libconftest.dylib \ -dynamiclib -Wl,-single_module conftest.c 2>conftest.err _lt_result=$? # If there is a non-empty error log, and "single_module" # appears in it, assume the flag caused a linker warning if test -s conftest.err && $GREP single_module conftest.err; then cat conftest.err >&AS_MESSAGE_LOG_FD # Otherwise, if the output was created with a 0 exit code from # the compiler, it worked. elif test -f libconftest.dylib && test $_lt_result -eq 0; then lt_cv_apple_cc_single_mod=yes else cat conftest.err >&AS_MESSAGE_LOG_FD fi rm -rf libconftest.dylib* rm -f conftest.* fi]) AC_CACHE_CHECK([for -exported_symbols_list linker flag], [lt_cv_ld_exported_symbols_list], [lt_cv_ld_exported_symbols_list=no save_LDFLAGS=$LDFLAGS echo "_main" > conftest.sym LDFLAGS="$LDFLAGS -Wl,-exported_symbols_list,conftest.sym" AC_LINK_IFELSE([AC_LANG_PROGRAM([],[])], [lt_cv_ld_exported_symbols_list=yes], [lt_cv_ld_exported_symbols_list=no]) LDFLAGS="$save_LDFLAGS" ]) AC_CACHE_CHECK([for -force_load linker flag],[lt_cv_ld_force_load], [lt_cv_ld_force_load=no cat > conftest.c << _LT_EOF int forced_loaded() { return 2;} _LT_EOF echo "$LTCC $LTCFLAGS -c -o conftest.o conftest.c" >&AS_MESSAGE_LOG_FD $LTCC $LTCFLAGS -c -o conftest.o conftest.c 2>&AS_MESSAGE_LOG_FD echo "$AR cru libconftest.a conftest.o" >&AS_MESSAGE_LOG_FD $AR cru libconftest.a conftest.o 2>&AS_MESSAGE_LOG_FD echo "$RANLIB libconftest.a" >&AS_MESSAGE_LOG_FD $RANLIB libconftest.a 2>&AS_MESSAGE_LOG_FD cat > conftest.c << _LT_EOF int main() { return 0;} _LT_EOF echo "$LTCC $LTCFLAGS $LDFLAGS -o conftest conftest.c -Wl,-force_load,./libconftest.a" >&AS_MESSAGE_LOG_FD $LTCC $LTCFLAGS $LDFLAGS -o conftest conftest.c -Wl,-force_load,./libconftest.a 2>conftest.err _lt_result=$? if test -s conftest.err && $GREP force_load conftest.err; then cat conftest.err >&AS_MESSAGE_LOG_FD elif test -f conftest && test $_lt_result -eq 0 && $GREP forced_load conftest >/dev/null 2>&1 ; then lt_cv_ld_force_load=yes else cat conftest.err >&AS_MESSAGE_LOG_FD fi rm -f conftest.err libconftest.a conftest conftest.c rm -rf conftest.dSYM ]) case $host_os in rhapsody* | darwin1.[[012]]) _lt_dar_allow_undefined='${wl}-undefined ${wl}suppress' ;; darwin1.*) _lt_dar_allow_undefined='${wl}-flat_namespace ${wl}-undefined ${wl}suppress' ;; darwin*) # darwin 5.x on # if running on 10.5 or later, the deployment target defaults # to the OS version, if on x86, and 10.4, the deployment # target defaults to 10.4. Don't you love it? case ${MACOSX_DEPLOYMENT_TARGET-10.0},$host in 10.0,*86*-darwin8*|10.0,*-darwin[[91]]*) _lt_dar_allow_undefined='${wl}-undefined ${wl}dynamic_lookup' ;; 10.[[012]]*) _lt_dar_allow_undefined='${wl}-flat_namespace ${wl}-undefined ${wl}suppress' ;; 10.*) _lt_dar_allow_undefined='${wl}-undefined ${wl}dynamic_lookup' ;; esac ;; esac if test "$lt_cv_apple_cc_single_mod" = "yes"; then _lt_dar_single_mod='$single_module' fi if test "$lt_cv_ld_exported_symbols_list" = "yes"; then _lt_dar_export_syms=' ${wl}-exported_symbols_list,$output_objdir/${libname}-symbols.expsym' else _lt_dar_export_syms='~$NMEDIT -s $output_objdir/${libname}-symbols.expsym ${lib}' fi if test "$DSYMUTIL" != ":" && test "$lt_cv_ld_force_load" = "no"; then _lt_dsymutil='~$DSYMUTIL $lib || :' else _lt_dsymutil= fi ;; esac ]) # _LT_DARWIN_LINKER_FEATURES([TAG]) # --------------------------------- # Checks for linker and compiler features on darwin m4_defun([_LT_DARWIN_LINKER_FEATURES], [ m4_require([_LT_REQUIRED_DARWIN_CHECKS]) _LT_TAGVAR(archive_cmds_need_lc, $1)=no _LT_TAGVAR(hardcode_direct, $1)=no _LT_TAGVAR(hardcode_automatic, $1)=yes _LT_TAGVAR(hardcode_shlibpath_var, $1)=unsupported if test "$lt_cv_ld_force_load" = "yes"; then _LT_TAGVAR(whole_archive_flag_spec, $1)='`for conv in $convenience\"\"; do test -n \"$conv\" && new_convenience=\"$new_convenience ${wl}-force_load,$conv\"; done; func_echo_all \"$new_convenience\"`' m4_case([$1], [F77], [_LT_TAGVAR(compiler_needs_object, $1)=yes], [FC], [_LT_TAGVAR(compiler_needs_object, $1)=yes]) else _LT_TAGVAR(whole_archive_flag_spec, $1)='' fi _LT_TAGVAR(link_all_deplibs, $1)=yes _LT_TAGVAR(allow_undefined_flag, $1)="$_lt_dar_allow_undefined" case $cc_basename in ifort*) _lt_dar_can_shared=yes ;; *) _lt_dar_can_shared=$GCC ;; esac if test "$_lt_dar_can_shared" = "yes"; then output_verbose_link_cmd=func_echo_all _LT_TAGVAR(archive_cmds, $1)="\$CC -dynamiclib \$allow_undefined_flag -o \$lib \$libobjs \$deplibs \$compiler_flags -install_name \$rpath/\$soname \$verstring $_lt_dar_single_mod${_lt_dsymutil}" _LT_TAGVAR(module_cmds, $1)="\$CC \$allow_undefined_flag -o \$lib -bundle \$libobjs \$deplibs \$compiler_flags${_lt_dsymutil}" _LT_TAGVAR(archive_expsym_cmds, $1)="sed 's,^,_,' < \$export_symbols > \$output_objdir/\${libname}-symbols.expsym~\$CC -dynamiclib \$allow_undefined_flag -o \$lib \$libobjs \$deplibs \$compiler_flags -install_name \$rpath/\$soname \$verstring ${_lt_dar_single_mod}${_lt_dar_export_syms}${_lt_dsymutil}" _LT_TAGVAR(module_expsym_cmds, $1)="sed -e 's,^,_,' < \$export_symbols > \$output_objdir/\${libname}-symbols.expsym~\$CC \$allow_undefined_flag -o \$lib -bundle \$libobjs \$deplibs \$compiler_flags${_lt_dar_export_syms}${_lt_dsymutil}" m4_if([$1], [CXX], [ if test "$lt_cv_apple_cc_single_mod" != "yes"; then _LT_TAGVAR(archive_cmds, $1)="\$CC -r -keep_private_externs -nostdlib -o \${lib}-master.o \$libobjs~\$CC -dynamiclib \$allow_undefined_flag -o \$lib \${lib}-master.o \$deplibs \$compiler_flags -install_name \$rpath/\$soname \$verstring${_lt_dsymutil}" _LT_TAGVAR(archive_expsym_cmds, $1)="sed 's,^,_,' < \$export_symbols > \$output_objdir/\${libname}-symbols.expsym~\$CC -r -keep_private_externs -nostdlib -o \${lib}-master.o \$libobjs~\$CC -dynamiclib \$allow_undefined_flag -o \$lib \${lib}-master.o \$deplibs \$compiler_flags -install_name \$rpath/\$soname \$verstring${_lt_dar_export_syms}${_lt_dsymutil}" fi ],[]) else _LT_TAGVAR(ld_shlibs, $1)=no fi ]) # _LT_SYS_MODULE_PATH_AIX([TAGNAME]) # ---------------------------------- # Links a minimal program and checks the executable # for the system default hardcoded library path. In most cases, # this is /usr/lib:/lib, but when the MPI compilers are used # the location of the communication and MPI libs are included too. # If we don't find anything, use the default library path according # to the aix ld manual. # Store the results from the different compilers for each TAGNAME. # Allow to override them for all tags through lt_cv_aix_libpath. m4_defun([_LT_SYS_MODULE_PATH_AIX], [m4_require([_LT_DECL_SED])dnl if test "${lt_cv_aix_libpath+set}" = set; then aix_libpath=$lt_cv_aix_libpath else AC_CACHE_VAL([_LT_TAGVAR([lt_cv_aix_libpath_], [$1])], [AC_LINK_IFELSE([AC_LANG_PROGRAM],[ lt_aix_libpath_sed='[ /Import File Strings/,/^$/ { /^0/ { s/^0 *\([^ ]*\) *$/\1/ p } }]' _LT_TAGVAR([lt_cv_aix_libpath_], [$1])=`dump -H conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` # Check for a 64-bit object if we didn't find anything. if test -z "$_LT_TAGVAR([lt_cv_aix_libpath_], [$1])"; then _LT_TAGVAR([lt_cv_aix_libpath_], [$1])=`dump -HX64 conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` fi],[]) if test -z "$_LT_TAGVAR([lt_cv_aix_libpath_], [$1])"; then _LT_TAGVAR([lt_cv_aix_libpath_], [$1])="/usr/lib:/lib" fi ]) aix_libpath=$_LT_TAGVAR([lt_cv_aix_libpath_], [$1]) fi ])# _LT_SYS_MODULE_PATH_AIX # _LT_SHELL_INIT(ARG) # ------------------- m4_define([_LT_SHELL_INIT], [m4_divert_text([M4SH-INIT], [$1 ])])# _LT_SHELL_INIT # _LT_PROG_ECHO_BACKSLASH # ----------------------- # Find how we can fake an echo command that does not interpret backslash. # In particular, with Autoconf 2.60 or later we add some code to the start # of the generated configure script which will find a shell with a builtin # printf (which we can use as an echo command). m4_defun([_LT_PROG_ECHO_BACKSLASH], [ECHO='\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\' ECHO=$ECHO$ECHO$ECHO$ECHO$ECHO ECHO=$ECHO$ECHO$ECHO$ECHO$ECHO$ECHO AC_MSG_CHECKING([how to print strings]) # Test print first, because it will be a builtin if present. if test "X`( print -r -- -n ) 2>/dev/null`" = X-n && \ test "X`print -r -- $ECHO 2>/dev/null`" = "X$ECHO"; then ECHO='print -r --' elif test "X`printf %s $ECHO 2>/dev/null`" = "X$ECHO"; then ECHO='printf %s\n' else # Use this function as a fallback that always works. func_fallback_echo () { eval 'cat <<_LTECHO_EOF $[]1 _LTECHO_EOF' } ECHO='func_fallback_echo' fi # func_echo_all arg... # Invoke $ECHO with all args, space-separated. func_echo_all () { $ECHO "$*" } case "$ECHO" in printf*) AC_MSG_RESULT([printf]) ;; print*) AC_MSG_RESULT([print -r]) ;; *) AC_MSG_RESULT([cat]) ;; esac m4_ifdef([_AS_DETECT_SUGGESTED], [_AS_DETECT_SUGGESTED([ test -n "${ZSH_VERSION+set}${BASH_VERSION+set}" || ( ECHO='\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\' ECHO=$ECHO$ECHO$ECHO$ECHO$ECHO ECHO=$ECHO$ECHO$ECHO$ECHO$ECHO$ECHO PATH=/empty FPATH=/empty; export PATH FPATH test "X`printf %s $ECHO`" = "X$ECHO" \ || test "X`print -r -- $ECHO`" = "X$ECHO" )])]) _LT_DECL([], [SHELL], [1], [Shell to use when invoking shell scripts]) _LT_DECL([], [ECHO], [1], [An echo program that protects backslashes]) ])# _LT_PROG_ECHO_BACKSLASH # _LT_WITH_SYSROOT # ---------------- AC_DEFUN([_LT_WITH_SYSROOT], [AC_MSG_CHECKING([for sysroot]) AC_ARG_WITH([sysroot], [ --with-sysroot[=DIR] Search for dependent libraries within DIR (or the compiler's sysroot if not specified).], [], [with_sysroot=no]) dnl lt_sysroot will always be passed unquoted. We quote it here dnl in case the user passed a directory name. lt_sysroot= case ${with_sysroot} in #( yes) if test "$GCC" = yes; then lt_sysroot=`$CC --print-sysroot 2>/dev/null` fi ;; #( /*) lt_sysroot=`echo "$with_sysroot" | sed -e "$sed_quote_subst"` ;; #( no|'') ;; #( *) AC_MSG_RESULT([${with_sysroot}]) AC_MSG_ERROR([The sysroot must be an absolute path.]) ;; esac AC_MSG_RESULT([${lt_sysroot:-no}]) _LT_DECL([], [lt_sysroot], [0], [The root where to search for ]dnl [dependent libraries, and in which our libraries should be installed.])]) # _LT_ENABLE_LOCK # --------------- m4_defun([_LT_ENABLE_LOCK], [AC_ARG_ENABLE([libtool-lock], [AS_HELP_STRING([--disable-libtool-lock], [avoid locking (might break parallel builds)])]) test "x$enable_libtool_lock" != xno && enable_libtool_lock=yes # Some flags need to be propagated to the compiler or linker for good # libtool support. case $host in ia64-*-hpux*) # Find out which ABI we are using. echo 'int i;' > conftest.$ac_ext if AC_TRY_EVAL(ac_compile); then case `/usr/bin/file conftest.$ac_objext` in *ELF-32*) HPUX_IA64_MODE="32" ;; *ELF-64*) HPUX_IA64_MODE="64" ;; esac fi rm -rf conftest* ;; *-*-irix6*) # Find out which ABI we are using. echo '[#]line '$LINENO' "configure"' > conftest.$ac_ext if AC_TRY_EVAL(ac_compile); then if test "$lt_cv_prog_gnu_ld" = yes; then case `/usr/bin/file conftest.$ac_objext` in *32-bit*) LD="${LD-ld} -melf32bsmip" ;; *N32*) LD="${LD-ld} -melf32bmipn32" ;; *64-bit*) LD="${LD-ld} -melf64bmip" ;; esac else case `/usr/bin/file conftest.$ac_objext` in *32-bit*) LD="${LD-ld} -32" ;; *N32*) LD="${LD-ld} -n32" ;; *64-bit*) LD="${LD-ld} -64" ;; esac fi fi rm -rf conftest* ;; x86_64-*kfreebsd*-gnu|x86_64-*linux*|ppc*-*linux*|powerpc*-*linux*| \ s390*-*linux*|s390*-*tpf*|sparc*-*linux*) # Find out which ABI we are using. echo 'int i;' > conftest.$ac_ext if AC_TRY_EVAL(ac_compile); then case `/usr/bin/file conftest.o` in *32-bit*) case $host in x86_64-*kfreebsd*-gnu) LD="${LD-ld} -m elf_i386_fbsd" ;; x86_64-*linux*) LD="${LD-ld} -m elf_i386" ;; ppc64-*linux*|powerpc64-*linux*) LD="${LD-ld} -m elf32ppclinux" ;; s390x-*linux*) LD="${LD-ld} -m elf_s390" ;; sparc64-*linux*) LD="${LD-ld} -m elf32_sparc" ;; esac ;; *64-bit*) case $host in x86_64-*kfreebsd*-gnu) LD="${LD-ld} -m elf_x86_64_fbsd" ;; x86_64-*linux*) LD="${LD-ld} -m elf_x86_64" ;; ppc*-*linux*|powerpc*-*linux*) LD="${LD-ld} -m elf64ppc" ;; s390*-*linux*|s390*-*tpf*) LD="${LD-ld} -m elf64_s390" ;; sparc*-*linux*) LD="${LD-ld} -m elf64_sparc" ;; esac ;; esac fi rm -rf conftest* ;; *-*-sco3.2v5*) # On SCO OpenServer 5, we need -belf to get full-featured binaries. SAVE_CFLAGS="$CFLAGS" CFLAGS="$CFLAGS -belf" AC_CACHE_CHECK([whether the C compiler needs -belf], lt_cv_cc_needs_belf, [AC_LANG_PUSH(C) AC_LINK_IFELSE([AC_LANG_PROGRAM([[]],[[]])],[lt_cv_cc_needs_belf=yes],[lt_cv_cc_needs_belf=no]) AC_LANG_POP]) if test x"$lt_cv_cc_needs_belf" != x"yes"; then # this is probably gcc 2.8.0, egcs 1.0 or newer; no need for -belf CFLAGS="$SAVE_CFLAGS" fi ;; *-*solaris*) # Find out which ABI we are using. echo 'int i;' > conftest.$ac_ext if AC_TRY_EVAL(ac_compile); then case `/usr/bin/file conftest.o` in *64-bit*) case $lt_cv_prog_gnu_ld in yes*) case $host in i?86-*-solaris*) LD="${LD-ld} -m elf_x86_64" ;; sparc*-*-solaris*) LD="${LD-ld} -m elf64_sparc" ;; esac # GNU ld 2.21 introduced _sol2 emulations. Use them if available. if ${LD-ld} -V | grep _sol2 >/dev/null 2>&1; then LD="${LD-ld}_sol2" fi ;; *) if ${LD-ld} -64 -r -o conftest2.o conftest.o >/dev/null 2>&1; then LD="${LD-ld} -64" fi ;; esac ;; esac fi rm -rf conftest* ;; esac need_locks="$enable_libtool_lock" ])# _LT_ENABLE_LOCK # _LT_PROG_AR # ----------- m4_defun([_LT_PROG_AR], [AC_CHECK_TOOLS(AR, [ar], false) : ${AR=ar} : ${AR_FLAGS=cru} _LT_DECL([], [AR], [1], [The archiver]) _LT_DECL([], [AR_FLAGS], [1], [Flags to create an archive]) AC_CACHE_CHECK([for archiver @FILE support], [lt_cv_ar_at_file], [lt_cv_ar_at_file=no AC_COMPILE_IFELSE([AC_LANG_PROGRAM], [echo conftest.$ac_objext > conftest.lst lt_ar_try='$AR $AR_FLAGS libconftest.a @conftest.lst >&AS_MESSAGE_LOG_FD' AC_TRY_EVAL([lt_ar_try]) if test "$ac_status" -eq 0; then # Ensure the archiver fails upon bogus file names. rm -f conftest.$ac_objext libconftest.a AC_TRY_EVAL([lt_ar_try]) if test "$ac_status" -ne 0; then lt_cv_ar_at_file=@ fi fi rm -f conftest.* libconftest.a ]) ]) if test "x$lt_cv_ar_at_file" = xno; then archiver_list_spec= else archiver_list_spec=$lt_cv_ar_at_file fi _LT_DECL([], [archiver_list_spec], [1], [How to feed a file listing to the archiver]) ])# _LT_PROG_AR # _LT_CMD_OLD_ARCHIVE # ------------------- m4_defun([_LT_CMD_OLD_ARCHIVE], [_LT_PROG_AR AC_CHECK_TOOL(STRIP, strip, :) test -z "$STRIP" && STRIP=: _LT_DECL([], [STRIP], [1], [A symbol stripping program]) AC_CHECK_TOOL(RANLIB, ranlib, :) test -z "$RANLIB" && RANLIB=: _LT_DECL([], [RANLIB], [1], [Commands used to install an old-style archive]) # Determine commands to create old-style static archives. old_archive_cmds='$AR $AR_FLAGS $oldlib$oldobjs' old_postinstall_cmds='chmod 644 $oldlib' old_postuninstall_cmds= if test -n "$RANLIB"; then case $host_os in openbsd*) old_postinstall_cmds="$old_postinstall_cmds~\$RANLIB -t \$tool_oldlib" ;; *) old_postinstall_cmds="$old_postinstall_cmds~\$RANLIB \$tool_oldlib" ;; esac old_archive_cmds="$old_archive_cmds~\$RANLIB \$tool_oldlib" fi case $host_os in darwin*) lock_old_archive_extraction=yes ;; *) lock_old_archive_extraction=no ;; esac _LT_DECL([], [old_postinstall_cmds], [2]) _LT_DECL([], [old_postuninstall_cmds], [2]) _LT_TAGDECL([], [old_archive_cmds], [2], [Commands used to build an old-style archive]) _LT_DECL([], [lock_old_archive_extraction], [0], [Whether to use a lock for old archive extraction]) ])# _LT_CMD_OLD_ARCHIVE # _LT_COMPILER_OPTION(MESSAGE, VARIABLE-NAME, FLAGS, # [OUTPUT-FILE], [ACTION-SUCCESS], [ACTION-FAILURE]) # ---------------------------------------------------------------- # Check whether the given compiler option works AC_DEFUN([_LT_COMPILER_OPTION], [m4_require([_LT_FILEUTILS_DEFAULTS])dnl m4_require([_LT_DECL_SED])dnl AC_CACHE_CHECK([$1], [$2], [$2=no m4_if([$4], , [ac_outfile=conftest.$ac_objext], [ac_outfile=$4]) echo "$lt_simple_compile_test_code" > conftest.$ac_ext lt_compiler_flag="$3" # Insert the option either (1) after the last *FLAGS variable, or # (2) before a word containing "conftest.", or (3) at the end. # Note that $ac_compile itself does not contain backslashes and begins # with a dollar sign (not a hyphen), so the echo should work correctly. # The option is referenced via a variable to avoid confusing sed. lt_compile=`echo "$ac_compile" | $SED \ -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \ -e 's: [[^ ]]*conftest\.: $lt_compiler_flag&:; t' \ -e 's:$: $lt_compiler_flag:'` (eval echo "\"\$as_me:$LINENO: $lt_compile\"" >&AS_MESSAGE_LOG_FD) (eval "$lt_compile" 2>conftest.err) ac_status=$? cat conftest.err >&AS_MESSAGE_LOG_FD echo "$as_me:$LINENO: \$? = $ac_status" >&AS_MESSAGE_LOG_FD if (exit $ac_status) && test -s "$ac_outfile"; then # The compiler can only warn and ignore the option if not recognized # So say no if there are warnings other than the usual output. $ECHO "$_lt_compiler_boilerplate" | $SED '/^$/d' >conftest.exp $SED '/^$/d; /^ *+/d' conftest.err >conftest.er2 if test ! -s conftest.er2 || diff conftest.exp conftest.er2 >/dev/null; then $2=yes fi fi $RM conftest* ]) if test x"[$]$2" = xyes; then m4_if([$5], , :, [$5]) else m4_if([$6], , :, [$6]) fi ])# _LT_COMPILER_OPTION # Old name: AU_ALIAS([AC_LIBTOOL_COMPILER_OPTION], [_LT_COMPILER_OPTION]) dnl aclocal-1.4 backwards compatibility: dnl AC_DEFUN([AC_LIBTOOL_COMPILER_OPTION], []) # _LT_LINKER_OPTION(MESSAGE, VARIABLE-NAME, FLAGS, # [ACTION-SUCCESS], [ACTION-FAILURE]) # ---------------------------------------------------- # Check whether the given linker option works AC_DEFUN([_LT_LINKER_OPTION], [m4_require([_LT_FILEUTILS_DEFAULTS])dnl m4_require([_LT_DECL_SED])dnl AC_CACHE_CHECK([$1], [$2], [$2=no save_LDFLAGS="$LDFLAGS" LDFLAGS="$LDFLAGS $3" echo "$lt_simple_link_test_code" > conftest.$ac_ext if (eval $ac_link 2>conftest.err) && test -s conftest$ac_exeext; then # The linker can only warn and ignore the option if not recognized # So say no if there are warnings if test -s conftest.err; then # Append any errors to the config.log. cat conftest.err 1>&AS_MESSAGE_LOG_FD $ECHO "$_lt_linker_boilerplate" | $SED '/^$/d' > conftest.exp $SED '/^$/d; /^ *+/d' conftest.err >conftest.er2 if diff conftest.exp conftest.er2 >/dev/null; then $2=yes fi else $2=yes fi fi $RM -r conftest* LDFLAGS="$save_LDFLAGS" ]) if test x"[$]$2" = xyes; then m4_if([$4], , :, [$4]) else m4_if([$5], , :, [$5]) fi ])# _LT_LINKER_OPTION # Old name: AU_ALIAS([AC_LIBTOOL_LINKER_OPTION], [_LT_LINKER_OPTION]) dnl aclocal-1.4 backwards compatibility: dnl AC_DEFUN([AC_LIBTOOL_LINKER_OPTION], []) # LT_CMD_MAX_LEN #--------------- AC_DEFUN([LT_CMD_MAX_LEN], [AC_REQUIRE([AC_CANONICAL_HOST])dnl # find the maximum length of command line arguments AC_MSG_CHECKING([the maximum length of command line arguments]) AC_CACHE_VAL([lt_cv_sys_max_cmd_len], [dnl i=0 teststring="ABCD" case $build_os in msdosdjgpp*) # On DJGPP, this test can blow up pretty badly due to problems in libc # (any single argument exceeding 2000 bytes causes a buffer overrun # during glob expansion). Even if it were fixed, the result of this # check would be larger than it should be. lt_cv_sys_max_cmd_len=12288; # 12K is about right ;; gnu*) # Under GNU Hurd, this test is not required because there is # no limit to the length of command line arguments. # Libtool will interpret -1 as no limit whatsoever lt_cv_sys_max_cmd_len=-1; ;; cygwin* | mingw* | cegcc*) # On Win9x/ME, this test blows up -- it succeeds, but takes # about 5 minutes as the teststring grows exponentially. # Worse, since 9x/ME are not pre-emptively multitasking, # you end up with a "frozen" computer, even though with patience # the test eventually succeeds (with a max line length of 256k). # Instead, let's just punt: use the minimum linelength reported by # all of the supported platforms: 8192 (on NT/2K/XP). lt_cv_sys_max_cmd_len=8192; ;; mint*) # On MiNT this can take a long time and run out of memory. lt_cv_sys_max_cmd_len=8192; ;; amigaos*) # On AmigaOS with pdksh, this test takes hours, literally. # So we just punt and use a minimum line length of 8192. lt_cv_sys_max_cmd_len=8192; ;; netbsd* | freebsd* | openbsd* | darwin* | dragonfly*) # This has been around since 386BSD, at least. Likely further. if test -x /sbin/sysctl; then lt_cv_sys_max_cmd_len=`/sbin/sysctl -n kern.argmax` elif test -x /usr/sbin/sysctl; then lt_cv_sys_max_cmd_len=`/usr/sbin/sysctl -n kern.argmax` else lt_cv_sys_max_cmd_len=65536 # usable default for all BSDs fi # And add a safety zone lt_cv_sys_max_cmd_len=`expr $lt_cv_sys_max_cmd_len \/ 4` lt_cv_sys_max_cmd_len=`expr $lt_cv_sys_max_cmd_len \* 3` ;; interix*) # We know the value 262144 and hardcode it with a safety zone (like BSD) lt_cv_sys_max_cmd_len=196608 ;; os2*) # The test takes a long time on OS/2. lt_cv_sys_max_cmd_len=8192 ;; osf*) # Dr. Hans Ekkehard Plesser reports seeing a kernel panic running configure # due to this test when exec_disable_arg_limit is 1 on Tru64. It is not # nice to cause kernel panics so lets avoid the loop below. # First set a reasonable default. lt_cv_sys_max_cmd_len=16384 # if test -x /sbin/sysconfig; then case `/sbin/sysconfig -q proc exec_disable_arg_limit` in *1*) lt_cv_sys_max_cmd_len=-1 ;; esac fi ;; sco3.2v5*) lt_cv_sys_max_cmd_len=102400 ;; sysv5* | sco5v6* | sysv4.2uw2*) kargmax=`grep ARG_MAX /etc/conf/cf.d/stune 2>/dev/null` if test -n "$kargmax"; then lt_cv_sys_max_cmd_len=`echo $kargmax | sed 's/.*[[ ]]//'` else lt_cv_sys_max_cmd_len=32768 fi ;; *) lt_cv_sys_max_cmd_len=`(getconf ARG_MAX) 2> /dev/null` if test -n "$lt_cv_sys_max_cmd_len"; then lt_cv_sys_max_cmd_len=`expr $lt_cv_sys_max_cmd_len \/ 4` lt_cv_sys_max_cmd_len=`expr $lt_cv_sys_max_cmd_len \* 3` else # Make teststring a little bigger before we do anything with it. # a 1K string should be a reasonable start. for i in 1 2 3 4 5 6 7 8 ; do teststring=$teststring$teststring done SHELL=${SHELL-${CONFIG_SHELL-/bin/sh}} # If test is not a shell built-in, we'll probably end up computing a # maximum length that is only half of the actual maximum length, but # we can't tell. while { test "X"`env echo "$teststring$teststring" 2>/dev/null` \ = "X$teststring$teststring"; } >/dev/null 2>&1 && test $i != 17 # 1/2 MB should be enough do i=`expr $i + 1` teststring=$teststring$teststring done # Only check the string length outside the loop. lt_cv_sys_max_cmd_len=`expr "X$teststring" : ".*" 2>&1` teststring= # Add a significant safety factor because C++ compilers can tack on # massive amounts of additional arguments before passing them to the # linker. It appears as though 1/2 is a usable value. lt_cv_sys_max_cmd_len=`expr $lt_cv_sys_max_cmd_len \/ 2` fi ;; esac ]) if test -n $lt_cv_sys_max_cmd_len ; then AC_MSG_RESULT($lt_cv_sys_max_cmd_len) else AC_MSG_RESULT(none) fi max_cmd_len=$lt_cv_sys_max_cmd_len _LT_DECL([], [max_cmd_len], [0], [What is the maximum length of a command?]) ])# LT_CMD_MAX_LEN # Old name: AU_ALIAS([AC_LIBTOOL_SYS_MAX_CMD_LEN], [LT_CMD_MAX_LEN]) dnl aclocal-1.4 backwards compatibility: dnl AC_DEFUN([AC_LIBTOOL_SYS_MAX_CMD_LEN], []) # _LT_HEADER_DLFCN # ---------------- m4_defun([_LT_HEADER_DLFCN], [AC_CHECK_HEADERS([dlfcn.h], [], [], [AC_INCLUDES_DEFAULT])dnl ])# _LT_HEADER_DLFCN # _LT_TRY_DLOPEN_SELF (ACTION-IF-TRUE, ACTION-IF-TRUE-W-USCORE, # ACTION-IF-FALSE, ACTION-IF-CROSS-COMPILING) # ---------------------------------------------------------------- m4_defun([_LT_TRY_DLOPEN_SELF], [m4_require([_LT_HEADER_DLFCN])dnl if test "$cross_compiling" = yes; then : [$4] else lt_dlunknown=0; lt_dlno_uscore=1; lt_dlneed_uscore=2 lt_status=$lt_dlunknown cat > conftest.$ac_ext <<_LT_EOF [#line $LINENO "configure" #include "confdefs.h" #if HAVE_DLFCN_H #include #endif #include #ifdef RTLD_GLOBAL # define LT_DLGLOBAL RTLD_GLOBAL #else # ifdef DL_GLOBAL # define LT_DLGLOBAL DL_GLOBAL # else # define LT_DLGLOBAL 0 # endif #endif /* We may have to define LT_DLLAZY_OR_NOW in the command line if we find out it does not work in some platform. */ #ifndef LT_DLLAZY_OR_NOW # ifdef RTLD_LAZY # define LT_DLLAZY_OR_NOW RTLD_LAZY # else # ifdef DL_LAZY # define LT_DLLAZY_OR_NOW DL_LAZY # else # ifdef RTLD_NOW # define LT_DLLAZY_OR_NOW RTLD_NOW # else # ifdef DL_NOW # define LT_DLLAZY_OR_NOW DL_NOW # else # define LT_DLLAZY_OR_NOW 0 # endif # endif # endif # endif #endif /* When -fvisbility=hidden is used, assume the code has been annotated correspondingly for the symbols needed. */ #if defined(__GNUC__) && (((__GNUC__ == 3) && (__GNUC_MINOR__ >= 3)) || (__GNUC__ > 3)) int fnord () __attribute__((visibility("default"))); #endif int fnord () { return 42; } int main () { void *self = dlopen (0, LT_DLGLOBAL|LT_DLLAZY_OR_NOW); int status = $lt_dlunknown; if (self) { if (dlsym (self,"fnord")) status = $lt_dlno_uscore; else { if (dlsym( self,"_fnord")) status = $lt_dlneed_uscore; else puts (dlerror ()); } /* dlclose (self); */ } else puts (dlerror ()); return status; }] _LT_EOF if AC_TRY_EVAL(ac_link) && test -s conftest${ac_exeext} 2>/dev/null; then (./conftest; exit; ) >&AS_MESSAGE_LOG_FD 2>/dev/null lt_status=$? case x$lt_status in x$lt_dlno_uscore) $1 ;; x$lt_dlneed_uscore) $2 ;; x$lt_dlunknown|x*) $3 ;; esac else : # compilation failed $3 fi fi rm -fr conftest* ])# _LT_TRY_DLOPEN_SELF # LT_SYS_DLOPEN_SELF # ------------------ AC_DEFUN([LT_SYS_DLOPEN_SELF], [m4_require([_LT_HEADER_DLFCN])dnl if test "x$enable_dlopen" != xyes; then enable_dlopen=unknown enable_dlopen_self=unknown enable_dlopen_self_static=unknown else lt_cv_dlopen=no lt_cv_dlopen_libs= case $host_os in beos*) lt_cv_dlopen="load_add_on" lt_cv_dlopen_libs= lt_cv_dlopen_self=yes ;; mingw* | pw32* | cegcc*) lt_cv_dlopen="LoadLibrary" lt_cv_dlopen_libs= ;; cygwin*) lt_cv_dlopen="dlopen" lt_cv_dlopen_libs= ;; darwin*) # if libdl is installed we need to link against it AC_CHECK_LIB([dl], [dlopen], [lt_cv_dlopen="dlopen" lt_cv_dlopen_libs="-ldl"],[ lt_cv_dlopen="dyld" lt_cv_dlopen_libs= lt_cv_dlopen_self=yes ]) ;; *) AC_CHECK_FUNC([shl_load], [lt_cv_dlopen="shl_load"], [AC_CHECK_LIB([dld], [shl_load], [lt_cv_dlopen="shl_load" lt_cv_dlopen_libs="-ldld"], [AC_CHECK_FUNC([dlopen], [lt_cv_dlopen="dlopen"], [AC_CHECK_LIB([dl], [dlopen], [lt_cv_dlopen="dlopen" lt_cv_dlopen_libs="-ldl"], [AC_CHECK_LIB([svld], [dlopen], [lt_cv_dlopen="dlopen" lt_cv_dlopen_libs="-lsvld"], [AC_CHECK_LIB([dld], [dld_link], [lt_cv_dlopen="dld_link" lt_cv_dlopen_libs="-ldld"]) ]) ]) ]) ]) ]) ;; esac if test "x$lt_cv_dlopen" != xno; then enable_dlopen=yes else enable_dlopen=no fi case $lt_cv_dlopen in dlopen) save_CPPFLAGS="$CPPFLAGS" test "x$ac_cv_header_dlfcn_h" = xyes && CPPFLAGS="$CPPFLAGS -DHAVE_DLFCN_H" save_LDFLAGS="$LDFLAGS" wl=$lt_prog_compiler_wl eval LDFLAGS=\"\$LDFLAGS $export_dynamic_flag_spec\" save_LIBS="$LIBS" LIBS="$lt_cv_dlopen_libs $LIBS" AC_CACHE_CHECK([whether a program can dlopen itself], lt_cv_dlopen_self, [dnl _LT_TRY_DLOPEN_SELF( lt_cv_dlopen_self=yes, lt_cv_dlopen_self=yes, lt_cv_dlopen_self=no, lt_cv_dlopen_self=cross) ]) if test "x$lt_cv_dlopen_self" = xyes; then wl=$lt_prog_compiler_wl eval LDFLAGS=\"\$LDFLAGS $lt_prog_compiler_static\" AC_CACHE_CHECK([whether a statically linked program can dlopen itself], lt_cv_dlopen_self_static, [dnl _LT_TRY_DLOPEN_SELF( lt_cv_dlopen_self_static=yes, lt_cv_dlopen_self_static=yes, lt_cv_dlopen_self_static=no, lt_cv_dlopen_self_static=cross) ]) fi CPPFLAGS="$save_CPPFLAGS" LDFLAGS="$save_LDFLAGS" LIBS="$save_LIBS" ;; esac case $lt_cv_dlopen_self in yes|no) enable_dlopen_self=$lt_cv_dlopen_self ;; *) enable_dlopen_self=unknown ;; esac case $lt_cv_dlopen_self_static in yes|no) enable_dlopen_self_static=$lt_cv_dlopen_self_static ;; *) enable_dlopen_self_static=unknown ;; esac fi _LT_DECL([dlopen_support], [enable_dlopen], [0], [Whether dlopen is supported]) _LT_DECL([dlopen_self], [enable_dlopen_self], [0], [Whether dlopen of programs is supported]) _LT_DECL([dlopen_self_static], [enable_dlopen_self_static], [0], [Whether dlopen of statically linked programs is supported]) ])# LT_SYS_DLOPEN_SELF # Old name: AU_ALIAS([AC_LIBTOOL_DLOPEN_SELF], [LT_SYS_DLOPEN_SELF]) dnl aclocal-1.4 backwards compatibility: dnl AC_DEFUN([AC_LIBTOOL_DLOPEN_SELF], []) # _LT_COMPILER_C_O([TAGNAME]) # --------------------------- # Check to see if options -c and -o are simultaneously supported by compiler. # This macro does not hard code the compiler like AC_PROG_CC_C_O. m4_defun([_LT_COMPILER_C_O], [m4_require([_LT_DECL_SED])dnl m4_require([_LT_FILEUTILS_DEFAULTS])dnl m4_require([_LT_TAG_COMPILER])dnl AC_CACHE_CHECK([if $compiler supports -c -o file.$ac_objext], [_LT_TAGVAR(lt_cv_prog_compiler_c_o, $1)], [_LT_TAGVAR(lt_cv_prog_compiler_c_o, $1)=no $RM -r conftest 2>/dev/null mkdir conftest cd conftest mkdir out echo "$lt_simple_compile_test_code" > conftest.$ac_ext lt_compiler_flag="-o out/conftest2.$ac_objext" # Insert the option either (1) after the last *FLAGS variable, or # (2) before a word containing "conftest.", or (3) at the end. # Note that $ac_compile itself does not contain backslashes and begins # with a dollar sign (not a hyphen), so the echo should work correctly. lt_compile=`echo "$ac_compile" | $SED \ -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \ -e 's: [[^ ]]*conftest\.: $lt_compiler_flag&:; t' \ -e 's:$: $lt_compiler_flag:'` (eval echo "\"\$as_me:$LINENO: $lt_compile\"" >&AS_MESSAGE_LOG_FD) (eval "$lt_compile" 2>out/conftest.err) ac_status=$? cat out/conftest.err >&AS_MESSAGE_LOG_FD echo "$as_me:$LINENO: \$? = $ac_status" >&AS_MESSAGE_LOG_FD if (exit $ac_status) && test -s out/conftest2.$ac_objext then # The compiler can only warn and ignore the option if not recognized # So say no if there are warnings $ECHO "$_lt_compiler_boilerplate" | $SED '/^$/d' > out/conftest.exp $SED '/^$/d; /^ *+/d' out/conftest.err >out/conftest.er2 if test ! -s out/conftest.er2 || diff out/conftest.exp out/conftest.er2 >/dev/null; then _LT_TAGVAR(lt_cv_prog_compiler_c_o, $1)=yes fi fi chmod u+w . 2>&AS_MESSAGE_LOG_FD $RM conftest* # SGI C++ compiler will create directory out/ii_files/ for # template instantiation test -d out/ii_files && $RM out/ii_files/* && rmdir out/ii_files $RM out/* && rmdir out cd .. $RM -r conftest $RM conftest* ]) _LT_TAGDECL([compiler_c_o], [lt_cv_prog_compiler_c_o], [1], [Does compiler simultaneously support -c and -o options?]) ])# _LT_COMPILER_C_O # _LT_COMPILER_FILE_LOCKS([TAGNAME]) # ---------------------------------- # Check to see if we can do hard links to lock some files if needed m4_defun([_LT_COMPILER_FILE_LOCKS], [m4_require([_LT_ENABLE_LOCK])dnl m4_require([_LT_FILEUTILS_DEFAULTS])dnl _LT_COMPILER_C_O([$1]) hard_links="nottested" if test "$_LT_TAGVAR(lt_cv_prog_compiler_c_o, $1)" = no && test "$need_locks" != no; then # do not overwrite the value of need_locks provided by the user AC_MSG_CHECKING([if we can lock with hard links]) hard_links=yes $RM conftest* ln conftest.a conftest.b 2>/dev/null && hard_links=no touch conftest.a ln conftest.a conftest.b 2>&5 || hard_links=no ln conftest.a conftest.b 2>/dev/null && hard_links=no AC_MSG_RESULT([$hard_links]) if test "$hard_links" = no; then AC_MSG_WARN([`$CC' does not support `-c -o', so `make -j' may be unsafe]) need_locks=warn fi else need_locks=no fi _LT_DECL([], [need_locks], [1], [Must we lock files when doing compilation?]) ])# _LT_COMPILER_FILE_LOCKS # _LT_CHECK_OBJDIR # ---------------- m4_defun([_LT_CHECK_OBJDIR], [AC_CACHE_CHECK([for objdir], [lt_cv_objdir], [rm -f .libs 2>/dev/null mkdir .libs 2>/dev/null if test -d .libs; then lt_cv_objdir=.libs else # MS-DOS does not allow filenames that begin with a dot. lt_cv_objdir=_libs fi rmdir .libs 2>/dev/null]) objdir=$lt_cv_objdir _LT_DECL([], [objdir], [0], [The name of the directory that contains temporary libtool files])dnl m4_pattern_allow([LT_OBJDIR])dnl AC_DEFINE_UNQUOTED(LT_OBJDIR, "$lt_cv_objdir/", [Define to the sub-directory in which libtool stores uninstalled libraries.]) ])# _LT_CHECK_OBJDIR # _LT_LINKER_HARDCODE_LIBPATH([TAGNAME]) # -------------------------------------- # Check hardcoding attributes. m4_defun([_LT_LINKER_HARDCODE_LIBPATH], [AC_MSG_CHECKING([how to hardcode library paths into programs]) _LT_TAGVAR(hardcode_action, $1)= if test -n "$_LT_TAGVAR(hardcode_libdir_flag_spec, $1)" || test -n "$_LT_TAGVAR(runpath_var, $1)" || test "X$_LT_TAGVAR(hardcode_automatic, $1)" = "Xyes" ; then # We can hardcode non-existent directories. if test "$_LT_TAGVAR(hardcode_direct, $1)" != no && # If the only mechanism to avoid hardcoding is shlibpath_var, we # have to relink, otherwise we might link with an installed library # when we should be linking with a yet-to-be-installed one ## test "$_LT_TAGVAR(hardcode_shlibpath_var, $1)" != no && test "$_LT_TAGVAR(hardcode_minus_L, $1)" != no; then # Linking always hardcodes the temporary library directory. _LT_TAGVAR(hardcode_action, $1)=relink else # We can link without hardcoding, and we can hardcode nonexisting dirs. _LT_TAGVAR(hardcode_action, $1)=immediate fi else # We cannot hardcode anything, or else we can only hardcode existing # directories. _LT_TAGVAR(hardcode_action, $1)=unsupported fi AC_MSG_RESULT([$_LT_TAGVAR(hardcode_action, $1)]) if test "$_LT_TAGVAR(hardcode_action, $1)" = relink || test "$_LT_TAGVAR(inherit_rpath, $1)" = yes; then # Fast installation is not supported enable_fast_install=no elif test "$shlibpath_overrides_runpath" = yes || test "$enable_shared" = no; then # Fast installation is not necessary enable_fast_install=needless fi _LT_TAGDECL([], [hardcode_action], [0], [How to hardcode a shared library path into an executable]) ])# _LT_LINKER_HARDCODE_LIBPATH # _LT_CMD_STRIPLIB # ---------------- m4_defun([_LT_CMD_STRIPLIB], [m4_require([_LT_DECL_EGREP]) striplib= old_striplib= AC_MSG_CHECKING([whether stripping libraries is possible]) if test -n "$STRIP" && $STRIP -V 2>&1 | $GREP "GNU strip" >/dev/null; then test -z "$old_striplib" && old_striplib="$STRIP --strip-debug" test -z "$striplib" && striplib="$STRIP --strip-unneeded" AC_MSG_RESULT([yes]) else # FIXME - insert some real tests, host_os isn't really good enough case $host_os in darwin*) if test -n "$STRIP" ; then striplib="$STRIP -x" old_striplib="$STRIP -S" AC_MSG_RESULT([yes]) else AC_MSG_RESULT([no]) fi ;; *) AC_MSG_RESULT([no]) ;; esac fi _LT_DECL([], [old_striplib], [1], [Commands to strip libraries]) _LT_DECL([], [striplib], [1]) ])# _LT_CMD_STRIPLIB # _LT_SYS_DYNAMIC_LINKER([TAG]) # ----------------------------- # PORTME Fill in your ld.so characteristics m4_defun([_LT_SYS_DYNAMIC_LINKER], [AC_REQUIRE([AC_CANONICAL_HOST])dnl m4_require([_LT_DECL_EGREP])dnl m4_require([_LT_FILEUTILS_DEFAULTS])dnl m4_require([_LT_DECL_OBJDUMP])dnl m4_require([_LT_DECL_SED])dnl m4_require([_LT_CHECK_SHELL_FEATURES])dnl AC_MSG_CHECKING([dynamic linker characteristics]) m4_if([$1], [], [ if test "$GCC" = yes; then case $host_os in darwin*) lt_awk_arg="/^libraries:/,/LR/" ;; *) lt_awk_arg="/^libraries:/" ;; esac case $host_os in mingw* | cegcc*) lt_sed_strip_eq="s,=\([[A-Za-z]]:\),\1,g" ;; *) lt_sed_strip_eq="s,=/,/,g" ;; esac lt_search_path_spec=`$CC -print-search-dirs | awk $lt_awk_arg | $SED -e "s/^libraries://" -e $lt_sed_strip_eq` case $lt_search_path_spec in *\;*) # if the path contains ";" then we assume it to be the separator # otherwise default to the standard path separator (i.e. ":") - it is # assumed that no part of a normal pathname contains ";" but that should # okay in the real world where ";" in dirpaths is itself problematic. lt_search_path_spec=`$ECHO "$lt_search_path_spec" | $SED 's/;/ /g'` ;; *) lt_search_path_spec=`$ECHO "$lt_search_path_spec" | $SED "s/$PATH_SEPARATOR/ /g"` ;; esac # Ok, now we have the path, separated by spaces, we can step through it # and add multilib dir if necessary. lt_tmp_lt_search_path_spec= lt_multi_os_dir=`$CC $CPPFLAGS $CFLAGS $LDFLAGS -print-multi-os-directory 2>/dev/null` for lt_sys_path in $lt_search_path_spec; do if test -d "$lt_sys_path/$lt_multi_os_dir"; then lt_tmp_lt_search_path_spec="$lt_tmp_lt_search_path_spec $lt_sys_path/$lt_multi_os_dir" else test -d "$lt_sys_path" && \ lt_tmp_lt_search_path_spec="$lt_tmp_lt_search_path_spec $lt_sys_path" fi done lt_search_path_spec=`$ECHO "$lt_tmp_lt_search_path_spec" | awk ' BEGIN {RS=" "; FS="/|\n";} { lt_foo=""; lt_count=0; for (lt_i = NF; lt_i > 0; lt_i--) { if ($lt_i != "" && $lt_i != ".") { if ($lt_i == "..") { lt_count++; } else { if (lt_count == 0) { lt_foo="/" $lt_i lt_foo; } else { lt_count--; } } } } if (lt_foo != "") { lt_freq[[lt_foo]]++; } if (lt_freq[[lt_foo]] == 1) { print lt_foo; } }'` # AWK program above erroneously prepends '/' to C:/dos/paths # for these hosts. case $host_os in mingw* | cegcc*) lt_search_path_spec=`$ECHO "$lt_search_path_spec" |\ $SED 's,/\([[A-Za-z]]:\),\1,g'` ;; esac sys_lib_search_path_spec=`$ECHO "$lt_search_path_spec" | $lt_NL2SP` else sys_lib_search_path_spec="/lib /usr/lib /usr/local/lib" fi]) library_names_spec= libname_spec='lib$name' soname_spec= shrext_cmds=".so" postinstall_cmds= postuninstall_cmds= finish_cmds= finish_eval= shlibpath_var= shlibpath_overrides_runpath=unknown version_type=none dynamic_linker="$host_os ld.so" sys_lib_dlsearch_path_spec="/lib /usr/lib" need_lib_prefix=unknown hardcode_into_libs=no # when you set need_version to no, make sure it does not cause -set_version # flags to be left without arguments need_version=unknown case $host_os in aix3*) version_type=linux # correct to gnu/linux during the next big refactor library_names_spec='${libname}${release}${shared_ext}$versuffix $libname.a' shlibpath_var=LIBPATH # AIX 3 has no versioning support, so we append a major version to the name. soname_spec='${libname}${release}${shared_ext}$major' ;; aix[[4-9]]*) version_type=linux # correct to gnu/linux during the next big refactor need_lib_prefix=no need_version=no hardcode_into_libs=yes if test "$host_cpu" = ia64; then # AIX 5 supports IA64 library_names_spec='${libname}${release}${shared_ext}$major ${libname}${release}${shared_ext}$versuffix $libname${shared_ext}' shlibpath_var=LD_LIBRARY_PATH else # With GCC up to 2.95.x, collect2 would create an import file # for dependence libraries. The import file would start with # the line `#! .'. This would cause the generated library to # depend on `.', always an invalid library. This was fixed in # development snapshots of GCC prior to 3.0. case $host_os in aix4 | aix4.[[01]] | aix4.[[01]].*) if { echo '#if __GNUC__ > 2 || (__GNUC__ == 2 && __GNUC_MINOR__ >= 97)' echo ' yes ' echo '#endif'; } | ${CC} -E - | $GREP yes > /dev/null; then : else can_build_shared=no fi ;; esac # AIX (on Power*) has no versioning support, so currently we can not hardcode correct # soname into executable. Probably we can add versioning support to # collect2, so additional links can be useful in future. if test "$aix_use_runtimelinking" = yes; then # If using run time linking (on AIX 4.2 or later) use lib.so # instead of lib.a to let people know that these are not # typical AIX shared libraries. library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' else # We preserve .a as extension for shared libraries through AIX4.2 # and later when we are not doing run time linking. library_names_spec='${libname}${release}.a $libname.a' soname_spec='${libname}${release}${shared_ext}$major' fi shlibpath_var=LIBPATH fi ;; amigaos*) case $host_cpu in powerpc) # Since July 2007 AmigaOS4 officially supports .so libraries. # When compiling the executable, add -use-dynld -Lsobjs: to the compileline. library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' ;; m68k) library_names_spec='$libname.ixlibrary $libname.a' # Create ${libname}_ixlibrary.a entries in /sys/libs. finish_eval='for lib in `ls $libdir/*.ixlibrary 2>/dev/null`; do libname=`func_echo_all "$lib" | $SED '\''s%^.*/\([[^/]]*\)\.ixlibrary$%\1%'\''`; test $RM /sys/libs/${libname}_ixlibrary.a; $show "cd /sys/libs && $LN_S $lib ${libname}_ixlibrary.a"; cd /sys/libs && $LN_S $lib ${libname}_ixlibrary.a || exit 1; done' ;; esac ;; beos*) library_names_spec='${libname}${shared_ext}' dynamic_linker="$host_os ld.so" shlibpath_var=LIBRARY_PATH ;; bsdi[[45]]*) version_type=linux # correct to gnu/linux during the next big refactor need_version=no library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' soname_spec='${libname}${release}${shared_ext}$major' finish_cmds='PATH="\$PATH:/sbin" ldconfig $libdir' shlibpath_var=LD_LIBRARY_PATH sys_lib_search_path_spec="/shlib /usr/lib /usr/X11/lib /usr/contrib/lib /lib /usr/local/lib" sys_lib_dlsearch_path_spec="/shlib /usr/lib /usr/local/lib" # the default ld.so.conf also contains /usr/contrib/lib and # /usr/X11R6/lib (/usr/X11 is a link to /usr/X11R6), but let us allow # libtool to hard-code these into programs ;; cygwin* | mingw* | pw32* | cegcc*) version_type=windows shrext_cmds=".dll" need_version=no need_lib_prefix=no case $GCC,$cc_basename in yes,*) # gcc library_names_spec='$libname.dll.a' # DLL is installed to $(libdir)/../bin by postinstall_cmds postinstall_cmds='base_file=`basename \${file}`~ dlpath=`$SHELL 2>&1 -c '\''. $dir/'\''\${base_file}'\''i; echo \$dlname'\''`~ dldir=$destdir/`dirname \$dlpath`~ test -d \$dldir || mkdir -p \$dldir~ $install_prog $dir/$dlname \$dldir/$dlname~ chmod a+x \$dldir/$dlname~ if test -n '\''$stripme'\'' && test -n '\''$striplib'\''; then eval '\''$striplib \$dldir/$dlname'\'' || exit \$?; fi' postuninstall_cmds='dldll=`$SHELL 2>&1 -c '\''. $file; echo \$dlname'\''`~ dlpath=$dir/\$dldll~ $RM \$dlpath' shlibpath_overrides_runpath=yes case $host_os in cygwin*) # Cygwin DLLs use 'cyg' prefix rather than 'lib' soname_spec='`echo ${libname} | sed -e 's/^lib/cyg/'``echo ${release} | $SED -e 's/[[.]]/-/g'`${versuffix}${shared_ext}' m4_if([$1], [],[ sys_lib_search_path_spec="$sys_lib_search_path_spec /usr/lib/w32api"]) ;; mingw* | cegcc*) # MinGW DLLs use traditional 'lib' prefix soname_spec='${libname}`echo ${release} | $SED -e 's/[[.]]/-/g'`${versuffix}${shared_ext}' ;; pw32*) # pw32 DLLs use 'pw' prefix rather than 'lib' library_names_spec='`echo ${libname} | sed -e 's/^lib/pw/'``echo ${release} | $SED -e 's/[[.]]/-/g'`${versuffix}${shared_ext}' ;; esac dynamic_linker='Win32 ld.exe' ;; *,cl*) # Native MSVC libname_spec='$name' soname_spec='${libname}`echo ${release} | $SED -e 's/[[.]]/-/g'`${versuffix}${shared_ext}' library_names_spec='${libname}.dll.lib' case $build_os in mingw*) sys_lib_search_path_spec= lt_save_ifs=$IFS IFS=';' for lt_path in $LIB do IFS=$lt_save_ifs # Let DOS variable expansion print the short 8.3 style file name. lt_path=`cd "$lt_path" 2>/dev/null && cmd //C "for %i in (".") do @echo %~si"` sys_lib_search_path_spec="$sys_lib_search_path_spec $lt_path" done IFS=$lt_save_ifs # Convert to MSYS style. sys_lib_search_path_spec=`$ECHO "$sys_lib_search_path_spec" | sed -e 's|\\\\|/|g' -e 's| \\([[a-zA-Z]]\\):| /\\1|g' -e 's|^ ||'` ;; cygwin*) # Convert to unix form, then to dos form, then back to unix form # but this time dos style (no spaces!) so that the unix form looks # like /cygdrive/c/PROGRA~1:/cygdr... sys_lib_search_path_spec=`cygpath --path --unix "$LIB"` sys_lib_search_path_spec=`cygpath --path --dos "$sys_lib_search_path_spec" 2>/dev/null` sys_lib_search_path_spec=`cygpath --path --unix "$sys_lib_search_path_spec" | $SED -e "s/$PATH_SEPARATOR/ /g"` ;; *) sys_lib_search_path_spec="$LIB" if $ECHO "$sys_lib_search_path_spec" | [$GREP ';[c-zC-Z]:/' >/dev/null]; then # It is most probably a Windows format PATH. sys_lib_search_path_spec=`$ECHO "$sys_lib_search_path_spec" | $SED -e 's/;/ /g'` else sys_lib_search_path_spec=`$ECHO "$sys_lib_search_path_spec" | $SED -e "s/$PATH_SEPARATOR/ /g"` fi # FIXME: find the short name or the path components, as spaces are # common. (e.g. "Program Files" -> "PROGRA~1") ;; esac # DLL is installed to $(libdir)/../bin by postinstall_cmds postinstall_cmds='base_file=`basename \${file}`~ dlpath=`$SHELL 2>&1 -c '\''. $dir/'\''\${base_file}'\''i; echo \$dlname'\''`~ dldir=$destdir/`dirname \$dlpath`~ test -d \$dldir || mkdir -p \$dldir~ $install_prog $dir/$dlname \$dldir/$dlname' postuninstall_cmds='dldll=`$SHELL 2>&1 -c '\''. $file; echo \$dlname'\''`~ dlpath=$dir/\$dldll~ $RM \$dlpath' shlibpath_overrides_runpath=yes dynamic_linker='Win32 link.exe' ;; *) # Assume MSVC wrapper library_names_spec='${libname}`echo ${release} | $SED -e 's/[[.]]/-/g'`${versuffix}${shared_ext} $libname.lib' dynamic_linker='Win32 ld.exe' ;; esac # FIXME: first we should search . and the directory the executable is in shlibpath_var=PATH ;; darwin* | rhapsody*) dynamic_linker="$host_os dyld" version_type=darwin need_lib_prefix=no need_version=no library_names_spec='${libname}${release}${major}$shared_ext ${libname}$shared_ext' soname_spec='${libname}${release}${major}$shared_ext' shlibpath_overrides_runpath=yes shlibpath_var=DYLD_LIBRARY_PATH shrext_cmds='`test .$module = .yes && echo .so || echo .dylib`' m4_if([$1], [],[ sys_lib_search_path_spec="$sys_lib_search_path_spec /usr/local/lib"]) sys_lib_dlsearch_path_spec='/usr/local/lib /lib /usr/lib' ;; dgux*) version_type=linux # correct to gnu/linux during the next big refactor need_lib_prefix=no need_version=no library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname$shared_ext' soname_spec='${libname}${release}${shared_ext}$major' shlibpath_var=LD_LIBRARY_PATH ;; freebsd* | dragonfly*) # DragonFly does not have aout. When/if they implement a new # versioning mechanism, adjust this. if test -x /usr/bin/objformat; then objformat=`/usr/bin/objformat` else case $host_os in freebsd[[23]].*) objformat=aout ;; *) objformat=elf ;; esac fi version_type=freebsd-$objformat case $version_type in freebsd-elf*) library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext} $libname${shared_ext}' need_version=no need_lib_prefix=no ;; freebsd-*) library_names_spec='${libname}${release}${shared_ext}$versuffix $libname${shared_ext}$versuffix' need_version=yes ;; esac shlibpath_var=LD_LIBRARY_PATH case $host_os in freebsd2.*) shlibpath_overrides_runpath=yes ;; freebsd3.[[01]]* | freebsdelf3.[[01]]*) shlibpath_overrides_runpath=yes hardcode_into_libs=yes ;; freebsd3.[[2-9]]* | freebsdelf3.[[2-9]]* | \ freebsd4.[[0-5]] | freebsdelf4.[[0-5]] | freebsd4.1.1 | freebsdelf4.1.1) shlibpath_overrides_runpath=no hardcode_into_libs=yes ;; *) # from 4.6 on, and DragonFly shlibpath_overrides_runpath=yes hardcode_into_libs=yes ;; esac ;; gnu*) version_type=linux # correct to gnu/linux during the next big refactor need_lib_prefix=no need_version=no library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}${major} ${libname}${shared_ext}' soname_spec='${libname}${release}${shared_ext}$major' shlibpath_var=LD_LIBRARY_PATH shlibpath_overrides_runpath=no hardcode_into_libs=yes ;; haiku*) version_type=linux # correct to gnu/linux during the next big refactor need_lib_prefix=no need_version=no dynamic_linker="$host_os runtime_loader" library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}${major} ${libname}${shared_ext}' soname_spec='${libname}${release}${shared_ext}$major' shlibpath_var=LIBRARY_PATH shlibpath_overrides_runpath=yes sys_lib_dlsearch_path_spec='/boot/home/config/lib /boot/common/lib /boot/system/lib' hardcode_into_libs=yes ;; hpux9* | hpux10* | hpux11*) # Give a soname corresponding to the major version so that dld.sl refuses to # link against other versions. version_type=sunos need_lib_prefix=no need_version=no case $host_cpu in ia64*) shrext_cmds='.so' hardcode_into_libs=yes dynamic_linker="$host_os dld.so" shlibpath_var=LD_LIBRARY_PATH shlibpath_overrides_runpath=yes # Unless +noenvvar is specified. library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' soname_spec='${libname}${release}${shared_ext}$major' if test "X$HPUX_IA64_MODE" = X32; then sys_lib_search_path_spec="/usr/lib/hpux32 /usr/local/lib/hpux32 /usr/local/lib" else sys_lib_search_path_spec="/usr/lib/hpux64 /usr/local/lib/hpux64" fi sys_lib_dlsearch_path_spec=$sys_lib_search_path_spec ;; hppa*64*) shrext_cmds='.sl' hardcode_into_libs=yes dynamic_linker="$host_os dld.sl" shlibpath_var=LD_LIBRARY_PATH # How should we handle SHLIB_PATH shlibpath_overrides_runpath=yes # Unless +noenvvar is specified. library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' soname_spec='${libname}${release}${shared_ext}$major' sys_lib_search_path_spec="/usr/lib/pa20_64 /usr/ccs/lib/pa20_64" sys_lib_dlsearch_path_spec=$sys_lib_search_path_spec ;; *) shrext_cmds='.sl' dynamic_linker="$host_os dld.sl" shlibpath_var=SHLIB_PATH shlibpath_overrides_runpath=no # +s is required to enable SHLIB_PATH library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' soname_spec='${libname}${release}${shared_ext}$major' ;; esac # HP-UX runs *really* slowly unless shared libraries are mode 555, ... postinstall_cmds='chmod 555 $lib' # or fails outright, so override atomically: install_override_mode=555 ;; interix[[3-9]]*) version_type=linux # correct to gnu/linux during the next big refactor need_lib_prefix=no need_version=no library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major ${libname}${shared_ext}' soname_spec='${libname}${release}${shared_ext}$major' dynamic_linker='Interix 3.x ld.so.1 (PE, like ELF)' shlibpath_var=LD_LIBRARY_PATH shlibpath_overrides_runpath=no hardcode_into_libs=yes ;; irix5* | irix6* | nonstopux*) case $host_os in nonstopux*) version_type=nonstopux ;; *) if test "$lt_cv_prog_gnu_ld" = yes; then version_type=linux # correct to gnu/linux during the next big refactor else version_type=irix fi ;; esac need_lib_prefix=no need_version=no soname_spec='${libname}${release}${shared_ext}$major' library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major ${libname}${release}${shared_ext} $libname${shared_ext}' case $host_os in irix5* | nonstopux*) libsuff= shlibsuff= ;; *) case $LD in # libtool.m4 will add one of these switches to LD *-32|*"-32 "|*-melf32bsmip|*"-melf32bsmip ") libsuff= shlibsuff= libmagic=32-bit;; *-n32|*"-n32 "|*-melf32bmipn32|*"-melf32bmipn32 ") libsuff=32 shlibsuff=N32 libmagic=N32;; *-64|*"-64 "|*-melf64bmip|*"-melf64bmip ") libsuff=64 shlibsuff=64 libmagic=64-bit;; *) libsuff= shlibsuff= libmagic=never-match;; esac ;; esac shlibpath_var=LD_LIBRARY${shlibsuff}_PATH shlibpath_overrides_runpath=no sys_lib_search_path_spec="/usr/lib${libsuff} /lib${libsuff} /usr/local/lib${libsuff}" sys_lib_dlsearch_path_spec="/usr/lib${libsuff} /lib${libsuff}" hardcode_into_libs=yes ;; # No shared lib support for Linux oldld, aout, or coff. linux*oldld* | linux*aout* | linux*coff*) dynamic_linker=no ;; # This must be glibc/ELF. linux* | k*bsd*-gnu | kopensolaris*-gnu) version_type=linux # correct to gnu/linux during the next big refactor need_lib_prefix=no need_version=no library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' soname_spec='${libname}${release}${shared_ext}$major' finish_cmds='PATH="\$PATH:/sbin" ldconfig -n $libdir' shlibpath_var=LD_LIBRARY_PATH shlibpath_overrides_runpath=no # Some binutils ld are patched to set DT_RUNPATH AC_CACHE_VAL([lt_cv_shlibpath_overrides_runpath], [lt_cv_shlibpath_overrides_runpath=no save_LDFLAGS=$LDFLAGS save_libdir=$libdir eval "libdir=/foo; wl=\"$_LT_TAGVAR(lt_prog_compiler_wl, $1)\"; \ LDFLAGS=\"\$LDFLAGS $_LT_TAGVAR(hardcode_libdir_flag_spec, $1)\"" AC_LINK_IFELSE([AC_LANG_PROGRAM([],[])], [AS_IF([ ($OBJDUMP -p conftest$ac_exeext) 2>/dev/null | grep "RUNPATH.*$libdir" >/dev/null], [lt_cv_shlibpath_overrides_runpath=yes])]) LDFLAGS=$save_LDFLAGS libdir=$save_libdir ]) shlibpath_overrides_runpath=$lt_cv_shlibpath_overrides_runpath # This implies no fast_install, which is unacceptable. # Some rework will be needed to allow for fast_install # before this can be enabled. hardcode_into_libs=yes # Add ABI-specific directories to the system library path. sys_lib_dlsearch_path_spec="/lib64 /usr/lib64 /lib /usr/lib" # Append ld.so.conf contents to the search path if test -f /etc/ld.so.conf; then lt_ld_extra=`awk '/^include / { system(sprintf("cd /etc; cat %s 2>/dev/null", \[$]2)); skip = 1; } { if (!skip) print \[$]0; skip = 0; }' < /etc/ld.so.conf | $SED -e 's/#.*//;/^[ ]*hwcap[ ]/d;s/[:, ]/ /g;s/=[^=]*$//;s/=[^= ]* / /g;s/"//g;/^$/d' | tr '\n' ' '` sys_lib_dlsearch_path_spec="$sys_lib_dlsearch_path_spec $lt_ld_extra" fi # We used to test for /lib/ld.so.1 and disable shared libraries on # powerpc, because MkLinux only supported shared libraries with the # GNU dynamic linker. Since this was broken with cross compilers, # most powerpc-linux boxes support dynamic linking these days and # people can always --disable-shared, the test was removed, and we # assume the GNU/Linux dynamic linker is in use. dynamic_linker='GNU/Linux ld.so' ;; netbsd*) version_type=sunos need_lib_prefix=no need_version=no if echo __ELF__ | $CC -E - | $GREP __ELF__ >/dev/null; then library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${shared_ext}$versuffix' finish_cmds='PATH="\$PATH:/sbin" ldconfig -m $libdir' dynamic_linker='NetBSD (a.out) ld.so' else library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major ${libname}${shared_ext}' soname_spec='${libname}${release}${shared_ext}$major' dynamic_linker='NetBSD ld.elf_so' fi shlibpath_var=LD_LIBRARY_PATH shlibpath_overrides_runpath=yes hardcode_into_libs=yes ;; newsos6) version_type=linux # correct to gnu/linux during the next big refactor library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' shlibpath_var=LD_LIBRARY_PATH shlibpath_overrides_runpath=yes ;; *nto* | *qnx*) version_type=qnx need_lib_prefix=no need_version=no library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' soname_spec='${libname}${release}${shared_ext}$major' shlibpath_var=LD_LIBRARY_PATH shlibpath_overrides_runpath=no hardcode_into_libs=yes dynamic_linker='ldqnx.so' ;; openbsd*) version_type=sunos sys_lib_dlsearch_path_spec="/usr/lib" need_lib_prefix=no # Some older versions of OpenBSD (3.3 at least) *do* need versioned libs. case $host_os in openbsd3.3 | openbsd3.3.*) need_version=yes ;; *) need_version=no ;; esac library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${shared_ext}$versuffix' finish_cmds='PATH="\$PATH:/sbin" ldconfig -m $libdir' shlibpath_var=LD_LIBRARY_PATH if test -z "`echo __ELF__ | $CC -E - | $GREP __ELF__`" || test "$host_os-$host_cpu" = "openbsd2.8-powerpc"; then case $host_os in openbsd2.[[89]] | openbsd2.[[89]].*) shlibpath_overrides_runpath=no ;; *) shlibpath_overrides_runpath=yes ;; esac else shlibpath_overrides_runpath=yes fi ;; os2*) libname_spec='$name' shrext_cmds=".dll" need_lib_prefix=no library_names_spec='$libname${shared_ext} $libname.a' dynamic_linker='OS/2 ld.exe' shlibpath_var=LIBPATH ;; osf3* | osf4* | osf5*) version_type=osf need_lib_prefix=no need_version=no soname_spec='${libname}${release}${shared_ext}$major' library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' shlibpath_var=LD_LIBRARY_PATH sys_lib_search_path_spec="/usr/shlib /usr/ccs/lib /usr/lib/cmplrs/cc /usr/lib /usr/local/lib /var/shlib" sys_lib_dlsearch_path_spec="$sys_lib_search_path_spec" ;; rdos*) dynamic_linker=no ;; solaris*) version_type=linux # correct to gnu/linux during the next big refactor need_lib_prefix=no need_version=no library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' soname_spec='${libname}${release}${shared_ext}$major' shlibpath_var=LD_LIBRARY_PATH shlibpath_overrides_runpath=yes hardcode_into_libs=yes # ldd complains unless libraries are executable postinstall_cmds='chmod +x $lib' ;; sunos4*) version_type=sunos library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${shared_ext}$versuffix' finish_cmds='PATH="\$PATH:/usr/etc" ldconfig $libdir' shlibpath_var=LD_LIBRARY_PATH shlibpath_overrides_runpath=yes if test "$with_gnu_ld" = yes; then need_lib_prefix=no fi need_version=yes ;; sysv4 | sysv4.3*) version_type=linux # correct to gnu/linux during the next big refactor library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' soname_spec='${libname}${release}${shared_ext}$major' shlibpath_var=LD_LIBRARY_PATH case $host_vendor in sni) shlibpath_overrides_runpath=no need_lib_prefix=no runpath_var=LD_RUN_PATH ;; siemens) need_lib_prefix=no ;; motorola) need_lib_prefix=no need_version=no shlibpath_overrides_runpath=no sys_lib_search_path_spec='/lib /usr/lib /usr/ccs/lib' ;; esac ;; sysv4*MP*) if test -d /usr/nec ;then version_type=linux # correct to gnu/linux during the next big refactor library_names_spec='$libname${shared_ext}.$versuffix $libname${shared_ext}.$major $libname${shared_ext}' soname_spec='$libname${shared_ext}.$major' shlibpath_var=LD_LIBRARY_PATH fi ;; sysv5* | sco3.2v5* | sco5v6* | unixware* | OpenUNIX* | sysv4*uw2*) version_type=freebsd-elf need_lib_prefix=no need_version=no library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext} $libname${shared_ext}' soname_spec='${libname}${release}${shared_ext}$major' shlibpath_var=LD_LIBRARY_PATH shlibpath_overrides_runpath=yes hardcode_into_libs=yes if test "$with_gnu_ld" = yes; then sys_lib_search_path_spec='/usr/local/lib /usr/gnu/lib /usr/ccs/lib /usr/lib /lib' else sys_lib_search_path_spec='/usr/ccs/lib /usr/lib' case $host_os in sco3.2v5*) sys_lib_search_path_spec="$sys_lib_search_path_spec /lib" ;; esac fi sys_lib_dlsearch_path_spec='/usr/lib' ;; tpf*) # TPF is a cross-target only. Preferred cross-host = GNU/Linux. version_type=linux # correct to gnu/linux during the next big refactor need_lib_prefix=no need_version=no library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' shlibpath_var=LD_LIBRARY_PATH shlibpath_overrides_runpath=no hardcode_into_libs=yes ;; uts4*) version_type=linux # correct to gnu/linux during the next big refactor library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' soname_spec='${libname}${release}${shared_ext}$major' shlibpath_var=LD_LIBRARY_PATH ;; *) dynamic_linker=no ;; esac AC_MSG_RESULT([$dynamic_linker]) test "$dynamic_linker" = no && can_build_shared=no variables_saved_for_relink="PATH $shlibpath_var $runpath_var" if test "$GCC" = yes; then variables_saved_for_relink="$variables_saved_for_relink GCC_EXEC_PREFIX COMPILER_PATH LIBRARY_PATH" fi if test "${lt_cv_sys_lib_search_path_spec+set}" = set; then sys_lib_search_path_spec="$lt_cv_sys_lib_search_path_spec" fi if test "${lt_cv_sys_lib_dlsearch_path_spec+set}" = set; then sys_lib_dlsearch_path_spec="$lt_cv_sys_lib_dlsearch_path_spec" fi _LT_DECL([], [variables_saved_for_relink], [1], [Variables whose values should be saved in libtool wrapper scripts and restored at link time]) _LT_DECL([], [need_lib_prefix], [0], [Do we need the "lib" prefix for modules?]) _LT_DECL([], [need_version], [0], [Do we need a version for libraries?]) _LT_DECL([], [version_type], [0], [Library versioning type]) _LT_DECL([], [runpath_var], [0], [Shared library runtime path variable]) _LT_DECL([], [shlibpath_var], [0],[Shared library path variable]) _LT_DECL([], [shlibpath_overrides_runpath], [0], [Is shlibpath searched before the hard-coded library search path?]) _LT_DECL([], [libname_spec], [1], [Format of library name prefix]) _LT_DECL([], [library_names_spec], [1], [[List of archive names. First name is the real one, the rest are links. The last name is the one that the linker finds with -lNAME]]) _LT_DECL([], [soname_spec], [1], [[The coded name of the library, if different from the real name]]) _LT_DECL([], [install_override_mode], [1], [Permission mode override for installation of shared libraries]) _LT_DECL([], [postinstall_cmds], [2], [Command to use after installation of a shared archive]) _LT_DECL([], [postuninstall_cmds], [2], [Command to use after uninstallation of a shared archive]) _LT_DECL([], [finish_cmds], [2], [Commands used to finish a libtool library installation in a directory]) _LT_DECL([], [finish_eval], [1], [[As "finish_cmds", except a single script fragment to be evaled but not shown]]) _LT_DECL([], [hardcode_into_libs], [0], [Whether we should hardcode library paths into libraries]) _LT_DECL([], [sys_lib_search_path_spec], [2], [Compile-time system search path for libraries]) _LT_DECL([], [sys_lib_dlsearch_path_spec], [2], [Run-time system search path for libraries]) ])# _LT_SYS_DYNAMIC_LINKER # _LT_PATH_TOOL_PREFIX(TOOL) # -------------------------- # find a file program which can recognize shared library AC_DEFUN([_LT_PATH_TOOL_PREFIX], [m4_require([_LT_DECL_EGREP])dnl AC_MSG_CHECKING([for $1]) AC_CACHE_VAL(lt_cv_path_MAGIC_CMD, [case $MAGIC_CMD in [[\\/*] | ?:[\\/]*]) lt_cv_path_MAGIC_CMD="$MAGIC_CMD" # Let the user override the test with a path. ;; *) lt_save_MAGIC_CMD="$MAGIC_CMD" lt_save_ifs="$IFS"; IFS=$PATH_SEPARATOR dnl $ac_dummy forces splitting on constant user-supplied paths. dnl POSIX.2 word splitting is done only on the output of word expansions, dnl not every word. This closes a longstanding sh security hole. ac_dummy="m4_if([$2], , $PATH, [$2])" for ac_dir in $ac_dummy; do IFS="$lt_save_ifs" test -z "$ac_dir" && ac_dir=. if test -f $ac_dir/$1; then lt_cv_path_MAGIC_CMD="$ac_dir/$1" if test -n "$file_magic_test_file"; then case $deplibs_check_method in "file_magic "*) file_magic_regex=`expr "$deplibs_check_method" : "file_magic \(.*\)"` MAGIC_CMD="$lt_cv_path_MAGIC_CMD" if eval $file_magic_cmd \$file_magic_test_file 2> /dev/null | $EGREP "$file_magic_regex" > /dev/null; then : else cat <<_LT_EOF 1>&2 *** Warning: the command libtool uses to detect shared libraries, *** $file_magic_cmd, produces output that libtool cannot recognize. *** The result is that libtool may fail to recognize shared libraries *** as such. This will affect the creation of libtool libraries that *** depend on shared libraries, but programs linked with such libtool *** libraries will work regardless of this problem. Nevertheless, you *** may want to report the problem to your system manager and/or to *** bug-libtool@gnu.org _LT_EOF fi ;; esac fi break fi done IFS="$lt_save_ifs" MAGIC_CMD="$lt_save_MAGIC_CMD" ;; esac]) MAGIC_CMD="$lt_cv_path_MAGIC_CMD" if test -n "$MAGIC_CMD"; then AC_MSG_RESULT($MAGIC_CMD) else AC_MSG_RESULT(no) fi _LT_DECL([], [MAGIC_CMD], [0], [Used to examine libraries when file_magic_cmd begins with "file"])dnl ])# _LT_PATH_TOOL_PREFIX # Old name: AU_ALIAS([AC_PATH_TOOL_PREFIX], [_LT_PATH_TOOL_PREFIX]) dnl aclocal-1.4 backwards compatibility: dnl AC_DEFUN([AC_PATH_TOOL_PREFIX], []) # _LT_PATH_MAGIC # -------------- # find a file program which can recognize a shared library m4_defun([_LT_PATH_MAGIC], [_LT_PATH_TOOL_PREFIX(${ac_tool_prefix}file, /usr/bin$PATH_SEPARATOR$PATH) if test -z "$lt_cv_path_MAGIC_CMD"; then if test -n "$ac_tool_prefix"; then _LT_PATH_TOOL_PREFIX(file, /usr/bin$PATH_SEPARATOR$PATH) else MAGIC_CMD=: fi fi ])# _LT_PATH_MAGIC # LT_PATH_LD # ---------- # find the pathname to the GNU or non-GNU linker AC_DEFUN([LT_PATH_LD], [AC_REQUIRE([AC_PROG_CC])dnl AC_REQUIRE([AC_CANONICAL_HOST])dnl AC_REQUIRE([AC_CANONICAL_BUILD])dnl m4_require([_LT_DECL_SED])dnl m4_require([_LT_DECL_EGREP])dnl m4_require([_LT_PROG_ECHO_BACKSLASH])dnl AC_ARG_WITH([gnu-ld], [AS_HELP_STRING([--with-gnu-ld], [assume the C compiler uses GNU ld @<:@default=no@:>@])], [test "$withval" = no || with_gnu_ld=yes], [with_gnu_ld=no])dnl ac_prog=ld if test "$GCC" = yes; then # Check if gcc -print-prog-name=ld gives a path. AC_MSG_CHECKING([for ld used by $CC]) case $host in *-*-mingw*) # gcc leaves a trailing carriage return which upsets mingw ac_prog=`($CC -print-prog-name=ld) 2>&5 | tr -d '\015'` ;; *) ac_prog=`($CC -print-prog-name=ld) 2>&5` ;; esac case $ac_prog in # Accept absolute paths. [[\\/]]* | ?:[[\\/]]*) re_direlt='/[[^/]][[^/]]*/\.\./' # Canonicalize the pathname of ld ac_prog=`$ECHO "$ac_prog"| $SED 's%\\\\%/%g'` while $ECHO "$ac_prog" | $GREP "$re_direlt" > /dev/null 2>&1; do ac_prog=`$ECHO $ac_prog| $SED "s%$re_direlt%/%"` done test -z "$LD" && LD="$ac_prog" ;; "") # If it fails, then pretend we aren't using GCC. ac_prog=ld ;; *) # If it is relative, then search for the first ld in PATH. with_gnu_ld=unknown ;; esac elif test "$with_gnu_ld" = yes; then AC_MSG_CHECKING([for GNU ld]) else AC_MSG_CHECKING([for non-GNU ld]) fi AC_CACHE_VAL(lt_cv_path_LD, [if test -z "$LD"; then lt_save_ifs="$IFS"; IFS=$PATH_SEPARATOR for ac_dir in $PATH; do IFS="$lt_save_ifs" test -z "$ac_dir" && ac_dir=. if test -f "$ac_dir/$ac_prog" || test -f "$ac_dir/$ac_prog$ac_exeext"; then lt_cv_path_LD="$ac_dir/$ac_prog" # Check to see if the program is GNU ld. I'd rather use --version, # but apparently some variants of GNU ld only accept -v. # Break only if it was the GNU/non-GNU ld that we prefer. case `"$lt_cv_path_LD" -v 2>&1 &1 /dev/null 2>&1; then lt_cv_deplibs_check_method='file_magic ^x86 archive import|^x86 DLL' lt_cv_file_magic_cmd='func_win32_libid' else # Keep this pattern in sync with the one in func_win32_libid. lt_cv_deplibs_check_method='file_magic file format (pei*-i386(.*architecture: i386)?|pe-arm-wince|pe-x86-64)' lt_cv_file_magic_cmd='$OBJDUMP -f' fi ;; cegcc*) # use the weaker test based on 'objdump'. See mingw*. lt_cv_deplibs_check_method='file_magic file format pe-arm-.*little(.*architecture: arm)?' lt_cv_file_magic_cmd='$OBJDUMP -f' ;; darwin* | rhapsody*) lt_cv_deplibs_check_method=pass_all ;; freebsd* | dragonfly*) if echo __ELF__ | $CC -E - | $GREP __ELF__ > /dev/null; then case $host_cpu in i*86 ) # Not sure whether the presence of OpenBSD here was a mistake. # Let's accept both of them until this is cleared up. lt_cv_deplibs_check_method='file_magic (FreeBSD|OpenBSD|DragonFly)/i[[3-9]]86 (compact )?demand paged shared library' lt_cv_file_magic_cmd=/usr/bin/file lt_cv_file_magic_test_file=`echo /usr/lib/libc.so.*` ;; esac else lt_cv_deplibs_check_method=pass_all fi ;; gnu*) lt_cv_deplibs_check_method=pass_all ;; haiku*) lt_cv_deplibs_check_method=pass_all ;; hpux10.20* | hpux11*) lt_cv_file_magic_cmd=/usr/bin/file case $host_cpu in ia64*) lt_cv_deplibs_check_method='file_magic (s[[0-9]][[0-9]][[0-9]]|ELF-[[0-9]][[0-9]]) shared object file - IA64' lt_cv_file_magic_test_file=/usr/lib/hpux32/libc.so ;; hppa*64*) [lt_cv_deplibs_check_method='file_magic (s[0-9][0-9][0-9]|ELF[ -][0-9][0-9])(-bit)?( [LM]SB)? shared object( file)?[, -]* PA-RISC [0-9]\.[0-9]'] lt_cv_file_magic_test_file=/usr/lib/pa20_64/libc.sl ;; *) lt_cv_deplibs_check_method='file_magic (s[[0-9]][[0-9]][[0-9]]|PA-RISC[[0-9]]\.[[0-9]]) shared library' lt_cv_file_magic_test_file=/usr/lib/libc.sl ;; esac ;; interix[[3-9]]*) # PIC code is broken on Interix 3.x, that's why |\.a not |_pic\.a here lt_cv_deplibs_check_method='match_pattern /lib[[^/]]+(\.so|\.a)$' ;; irix5* | irix6* | nonstopux*) case $LD in *-32|*"-32 ") libmagic=32-bit;; *-n32|*"-n32 ") libmagic=N32;; *-64|*"-64 ") libmagic=64-bit;; *) libmagic=never-match;; esac lt_cv_deplibs_check_method=pass_all ;; # This must be glibc/ELF. linux* | k*bsd*-gnu | kopensolaris*-gnu) lt_cv_deplibs_check_method=pass_all ;; netbsd*) if echo __ELF__ | $CC -E - | $GREP __ELF__ > /dev/null; then lt_cv_deplibs_check_method='match_pattern /lib[[^/]]+(\.so\.[[0-9]]+\.[[0-9]]+|_pic\.a)$' else lt_cv_deplibs_check_method='match_pattern /lib[[^/]]+(\.so|_pic\.a)$' fi ;; newos6*) lt_cv_deplibs_check_method='file_magic ELF [[0-9]][[0-9]]*-bit [[ML]]SB (executable|dynamic lib)' lt_cv_file_magic_cmd=/usr/bin/file lt_cv_file_magic_test_file=/usr/lib/libnls.so ;; *nto* | *qnx*) lt_cv_deplibs_check_method=pass_all ;; openbsd*) if test -z "`echo __ELF__ | $CC -E - | $GREP __ELF__`" || test "$host_os-$host_cpu" = "openbsd2.8-powerpc"; then lt_cv_deplibs_check_method='match_pattern /lib[[^/]]+(\.so\.[[0-9]]+\.[[0-9]]+|\.so|_pic\.a)$' else lt_cv_deplibs_check_method='match_pattern /lib[[^/]]+(\.so\.[[0-9]]+\.[[0-9]]+|_pic\.a)$' fi ;; osf3* | osf4* | osf5*) lt_cv_deplibs_check_method=pass_all ;; rdos*) lt_cv_deplibs_check_method=pass_all ;; solaris*) lt_cv_deplibs_check_method=pass_all ;; sysv5* | sco3.2v5* | sco5v6* | unixware* | OpenUNIX* | sysv4*uw2*) lt_cv_deplibs_check_method=pass_all ;; sysv4 | sysv4.3*) case $host_vendor in motorola) lt_cv_deplibs_check_method='file_magic ELF [[0-9]][[0-9]]*-bit [[ML]]SB (shared object|dynamic lib) M[[0-9]][[0-9]]* Version [[0-9]]' lt_cv_file_magic_test_file=`echo /usr/lib/libc.so*` ;; ncr) lt_cv_deplibs_check_method=pass_all ;; sequent) lt_cv_file_magic_cmd='/bin/file' lt_cv_deplibs_check_method='file_magic ELF [[0-9]][[0-9]]*-bit [[LM]]SB (shared object|dynamic lib )' ;; sni) lt_cv_file_magic_cmd='/bin/file' lt_cv_deplibs_check_method="file_magic ELF [[0-9]][[0-9]]*-bit [[LM]]SB dynamic lib" lt_cv_file_magic_test_file=/lib/libc.so ;; siemens) lt_cv_deplibs_check_method=pass_all ;; pc) lt_cv_deplibs_check_method=pass_all ;; esac ;; tpf*) lt_cv_deplibs_check_method=pass_all ;; esac ]) file_magic_glob= want_nocaseglob=no if test "$build" = "$host"; then case $host_os in mingw* | pw32*) if ( shopt | grep nocaseglob ) >/dev/null 2>&1; then want_nocaseglob=yes else file_magic_glob=`echo aAbBcCdDeEfFgGhHiIjJkKlLmMnNoOpPqQrRsStTuUvVwWxXyYzZ | $SED -e "s/\(..\)/s\/[[\1]]\/[[\1]]\/g;/g"` fi ;; esac fi file_magic_cmd=$lt_cv_file_magic_cmd deplibs_check_method=$lt_cv_deplibs_check_method test -z "$deplibs_check_method" && deplibs_check_method=unknown _LT_DECL([], [deplibs_check_method], [1], [Method to check whether dependent libraries are shared objects]) _LT_DECL([], [file_magic_cmd], [1], [Command to use when deplibs_check_method = "file_magic"]) _LT_DECL([], [file_magic_glob], [1], [How to find potential files when deplibs_check_method = "file_magic"]) _LT_DECL([], [want_nocaseglob], [1], [Find potential files using nocaseglob when deplibs_check_method = "file_magic"]) ])# _LT_CHECK_MAGIC_METHOD # LT_PATH_NM # ---------- # find the pathname to a BSD- or MS-compatible name lister AC_DEFUN([LT_PATH_NM], [AC_REQUIRE([AC_PROG_CC])dnl AC_CACHE_CHECK([for BSD- or MS-compatible name lister (nm)], lt_cv_path_NM, [if test -n "$NM"; then # Let the user override the test. lt_cv_path_NM="$NM" else lt_nm_to_check="${ac_tool_prefix}nm" if test -n "$ac_tool_prefix" && test "$build" = "$host"; then lt_nm_to_check="$lt_nm_to_check nm" fi for lt_tmp_nm in $lt_nm_to_check; do lt_save_ifs="$IFS"; IFS=$PATH_SEPARATOR for ac_dir in $PATH /usr/ccs/bin/elf /usr/ccs/bin /usr/ucb /bin; do IFS="$lt_save_ifs" test -z "$ac_dir" && ac_dir=. tmp_nm="$ac_dir/$lt_tmp_nm" if test -f "$tmp_nm" || test -f "$tmp_nm$ac_exeext" ; then # Check to see if the nm accepts a BSD-compat flag. # Adding the `sed 1q' prevents false positives on HP-UX, which says: # nm: unknown option "B" ignored # Tru64's nm complains that /dev/null is an invalid object file case `"$tmp_nm" -B /dev/null 2>&1 | sed '1q'` in */dev/null* | *'Invalid file or object type'*) lt_cv_path_NM="$tmp_nm -B" break ;; *) case `"$tmp_nm" -p /dev/null 2>&1 | sed '1q'` in */dev/null*) lt_cv_path_NM="$tmp_nm -p" break ;; *) lt_cv_path_NM=${lt_cv_path_NM="$tmp_nm"} # keep the first match, but continue # so that we can try to find one that supports BSD flags ;; esac ;; esac fi done IFS="$lt_save_ifs" done : ${lt_cv_path_NM=no} fi]) if test "$lt_cv_path_NM" != "no"; then NM="$lt_cv_path_NM" else # Didn't find any BSD compatible name lister, look for dumpbin. if test -n "$DUMPBIN"; then : # Let the user override the test. else AC_CHECK_TOOLS(DUMPBIN, [dumpbin "link -dump"], :) case `$DUMPBIN -symbols /dev/null 2>&1 | sed '1q'` in *COFF*) DUMPBIN="$DUMPBIN -symbols" ;; *) DUMPBIN=: ;; esac fi AC_SUBST([DUMPBIN]) if test "$DUMPBIN" != ":"; then NM="$DUMPBIN" fi fi test -z "$NM" && NM=nm AC_SUBST([NM]) _LT_DECL([], [NM], [1], [A BSD- or MS-compatible name lister])dnl AC_CACHE_CHECK([the name lister ($NM) interface], [lt_cv_nm_interface], [lt_cv_nm_interface="BSD nm" echo "int some_variable = 0;" > conftest.$ac_ext (eval echo "\"\$as_me:$LINENO: $ac_compile\"" >&AS_MESSAGE_LOG_FD) (eval "$ac_compile" 2>conftest.err) cat conftest.err >&AS_MESSAGE_LOG_FD (eval echo "\"\$as_me:$LINENO: $NM \\\"conftest.$ac_objext\\\"\"" >&AS_MESSAGE_LOG_FD) (eval "$NM \"conftest.$ac_objext\"" 2>conftest.err > conftest.out) cat conftest.err >&AS_MESSAGE_LOG_FD (eval echo "\"\$as_me:$LINENO: output\"" >&AS_MESSAGE_LOG_FD) cat conftest.out >&AS_MESSAGE_LOG_FD if $GREP 'External.*some_variable' conftest.out > /dev/null; then lt_cv_nm_interface="MS dumpbin" fi rm -f conftest*]) ])# LT_PATH_NM # Old names: AU_ALIAS([AM_PROG_NM], [LT_PATH_NM]) AU_ALIAS([AC_PROG_NM], [LT_PATH_NM]) dnl aclocal-1.4 backwards compatibility: dnl AC_DEFUN([AM_PROG_NM], []) dnl AC_DEFUN([AC_PROG_NM], []) # _LT_CHECK_SHAREDLIB_FROM_LINKLIB # -------------------------------- # how to determine the name of the shared library # associated with a specific link library. # -- PORTME fill in with the dynamic library characteristics m4_defun([_LT_CHECK_SHAREDLIB_FROM_LINKLIB], [m4_require([_LT_DECL_EGREP]) m4_require([_LT_DECL_OBJDUMP]) m4_require([_LT_DECL_DLLTOOL]) AC_CACHE_CHECK([how to associate runtime and link libraries], lt_cv_sharedlib_from_linklib_cmd, [lt_cv_sharedlib_from_linklib_cmd='unknown' case $host_os in cygwin* | mingw* | pw32* | cegcc*) # two different shell functions defined in ltmain.sh # decide which to use based on capabilities of $DLLTOOL case `$DLLTOOL --help 2>&1` in *--identify-strict*) lt_cv_sharedlib_from_linklib_cmd=func_cygming_dll_for_implib ;; *) lt_cv_sharedlib_from_linklib_cmd=func_cygming_dll_for_implib_fallback ;; esac ;; *) # fallback: assume linklib IS sharedlib lt_cv_sharedlib_from_linklib_cmd="$ECHO" ;; esac ]) sharedlib_from_linklib_cmd=$lt_cv_sharedlib_from_linklib_cmd test -z "$sharedlib_from_linklib_cmd" && sharedlib_from_linklib_cmd=$ECHO _LT_DECL([], [sharedlib_from_linklib_cmd], [1], [Command to associate shared and link libraries]) ])# _LT_CHECK_SHAREDLIB_FROM_LINKLIB # _LT_PATH_MANIFEST_TOOL # ---------------------- # locate the manifest tool m4_defun([_LT_PATH_MANIFEST_TOOL], [AC_CHECK_TOOL(MANIFEST_TOOL, mt, :) test -z "$MANIFEST_TOOL" && MANIFEST_TOOL=mt AC_CACHE_CHECK([if $MANIFEST_TOOL is a manifest tool], [lt_cv_path_mainfest_tool], [lt_cv_path_mainfest_tool=no echo "$as_me:$LINENO: $MANIFEST_TOOL '-?'" >&AS_MESSAGE_LOG_FD $MANIFEST_TOOL '-?' 2>conftest.err > conftest.out cat conftest.err >&AS_MESSAGE_LOG_FD if $GREP 'Manifest Tool' conftest.out > /dev/null; then lt_cv_path_mainfest_tool=yes fi rm -f conftest*]) if test "x$lt_cv_path_mainfest_tool" != xyes; then MANIFEST_TOOL=: fi _LT_DECL([], [MANIFEST_TOOL], [1], [Manifest tool])dnl ])# _LT_PATH_MANIFEST_TOOL # LT_LIB_M # -------- # check for math library AC_DEFUN([LT_LIB_M], [AC_REQUIRE([AC_CANONICAL_HOST])dnl LIBM= case $host in *-*-beos* | *-*-cegcc* | *-*-cygwin* | *-*-haiku* | *-*-pw32* | *-*-darwin*) # These system don't have libm, or don't need it ;; *-ncr-sysv4.3*) AC_CHECK_LIB(mw, _mwvalidcheckl, LIBM="-lmw") AC_CHECK_LIB(m, cos, LIBM="$LIBM -lm") ;; *) AC_CHECK_LIB(m, cos, LIBM="-lm") ;; esac AC_SUBST([LIBM]) ])# LT_LIB_M # Old name: AU_ALIAS([AC_CHECK_LIBM], [LT_LIB_M]) dnl aclocal-1.4 backwards compatibility: dnl AC_DEFUN([AC_CHECK_LIBM], []) # _LT_COMPILER_NO_RTTI([TAGNAME]) # ------------------------------- m4_defun([_LT_COMPILER_NO_RTTI], [m4_require([_LT_TAG_COMPILER])dnl _LT_TAGVAR(lt_prog_compiler_no_builtin_flag, $1)= if test "$GCC" = yes; then case $cc_basename in nvcc*) _LT_TAGVAR(lt_prog_compiler_no_builtin_flag, $1)=' -Xcompiler -fno-builtin' ;; *) _LT_TAGVAR(lt_prog_compiler_no_builtin_flag, $1)=' -fno-builtin' ;; esac _LT_COMPILER_OPTION([if $compiler supports -fno-rtti -fno-exceptions], lt_cv_prog_compiler_rtti_exceptions, [-fno-rtti -fno-exceptions], [], [_LT_TAGVAR(lt_prog_compiler_no_builtin_flag, $1)="$_LT_TAGVAR(lt_prog_compiler_no_builtin_flag, $1) -fno-rtti -fno-exceptions"]) fi _LT_TAGDECL([no_builtin_flag], [lt_prog_compiler_no_builtin_flag], [1], [Compiler flag to turn off builtin functions]) ])# _LT_COMPILER_NO_RTTI # _LT_CMD_GLOBAL_SYMBOLS # ---------------------- m4_defun([_LT_CMD_GLOBAL_SYMBOLS], [AC_REQUIRE([AC_CANONICAL_HOST])dnl AC_REQUIRE([AC_PROG_CC])dnl AC_REQUIRE([AC_PROG_AWK])dnl AC_REQUIRE([LT_PATH_NM])dnl AC_REQUIRE([LT_PATH_LD])dnl m4_require([_LT_DECL_SED])dnl m4_require([_LT_DECL_EGREP])dnl m4_require([_LT_TAG_COMPILER])dnl # Check for command to grab the raw symbol name followed by C symbol from nm. AC_MSG_CHECKING([command to parse $NM output from $compiler object]) AC_CACHE_VAL([lt_cv_sys_global_symbol_pipe], [ # These are sane defaults that work on at least a few old systems. # [They come from Ultrix. What could be older than Ultrix?!! ;)] # Character class describing NM global symbol codes. symcode='[[BCDEGRST]]' # Regexp to match symbols that can be accessed directly from C. sympat='\([[_A-Za-z]][[_A-Za-z0-9]]*\)' # Define system-specific variables. case $host_os in aix*) symcode='[[BCDT]]' ;; cygwin* | mingw* | pw32* | cegcc*) symcode='[[ABCDGISTW]]' ;; hpux*) if test "$host_cpu" = ia64; then symcode='[[ABCDEGRST]]' fi ;; irix* | nonstopux*) symcode='[[BCDEGRST]]' ;; osf*) symcode='[[BCDEGQRST]]' ;; solaris*) symcode='[[BDRT]]' ;; sco3.2v5*) symcode='[[DT]]' ;; sysv4.2uw2*) symcode='[[DT]]' ;; sysv5* | sco5v6* | unixware* | OpenUNIX*) symcode='[[ABDT]]' ;; sysv4) symcode='[[DFNSTU]]' ;; esac # If we're using GNU nm, then use its standard symbol codes. case `$NM -V 2>&1` in *GNU* | *'with BFD'*) symcode='[[ABCDGIRSTW]]' ;; esac # Transform an extracted symbol line into a proper C declaration. # Some systems (esp. on ia64) link data and code symbols differently, # so use this general approach. lt_cv_sys_global_symbol_to_cdecl="sed -n -e 's/^T .* \(.*\)$/extern int \1();/p' -e 's/^$symcode* .* \(.*\)$/extern char \1;/p'" # Transform an extracted symbol line into symbol name and symbol address lt_cv_sys_global_symbol_to_c_name_address="sed -n -e 's/^: \([[^ ]]*\)[[ ]]*$/ {\\\"\1\\\", (void *) 0},/p' -e 's/^$symcode* \([[^ ]]*\) \([[^ ]]*\)$/ {\"\2\", (void *) \&\2},/p'" lt_cv_sys_global_symbol_to_c_name_address_lib_prefix="sed -n -e 's/^: \([[^ ]]*\)[[ ]]*$/ {\\\"\1\\\", (void *) 0},/p' -e 's/^$symcode* \([[^ ]]*\) \(lib[[^ ]]*\)$/ {\"\2\", (void *) \&\2},/p' -e 's/^$symcode* \([[^ ]]*\) \([[^ ]]*\)$/ {\"lib\2\", (void *) \&\2},/p'" # Handle CRLF in mingw tool chain opt_cr= case $build_os in mingw*) opt_cr=`$ECHO 'x\{0,1\}' | tr x '\015'` # option cr in regexp ;; esac # Try without a prefix underscore, then with it. for ac_symprfx in "" "_"; do # Transform symcode, sympat, and symprfx into a raw symbol and a C symbol. symxfrm="\\1 $ac_symprfx\\2 \\2" # Write the raw and C identifiers. if test "$lt_cv_nm_interface" = "MS dumpbin"; then # Fake it for dumpbin and say T for any non-static function # and D for any global variable. # Also find C++ and __fastcall symbols from MSVC++, # which start with @ or ?. lt_cv_sys_global_symbol_pipe="$AWK ['"\ " {last_section=section; section=\$ 3};"\ " /^COFF SYMBOL TABLE/{for(i in hide) delete hide[i]};"\ " /Section length .*#relocs.*(pick any)/{hide[last_section]=1};"\ " \$ 0!~/External *\|/{next};"\ " / 0+ UNDEF /{next}; / UNDEF \([^|]\)*()/{next};"\ " {if(hide[section]) next};"\ " {f=0}; \$ 0~/\(\).*\|/{f=1}; {printf f ? \"T \" : \"D \"};"\ " {split(\$ 0, a, /\||\r/); split(a[2], s)};"\ " s[1]~/^[@?]/{print s[1], s[1]; next};"\ " s[1]~prfx {split(s[1],t,\"@\"); print t[1], substr(t[1],length(prfx))}"\ " ' prfx=^$ac_symprfx]" else lt_cv_sys_global_symbol_pipe="sed -n -e 's/^.*[[ ]]\($symcode$symcode*\)[[ ]][[ ]]*$ac_symprfx$sympat$opt_cr$/$symxfrm/p'" fi lt_cv_sys_global_symbol_pipe="$lt_cv_sys_global_symbol_pipe | sed '/ __gnu_lto/d'" # Check to see that the pipe works correctly. pipe_works=no rm -f conftest* cat > conftest.$ac_ext <<_LT_EOF #ifdef __cplusplus extern "C" { #endif char nm_test_var; void nm_test_func(void); void nm_test_func(void){} #ifdef __cplusplus } #endif int main(){nm_test_var='a';nm_test_func();return(0);} _LT_EOF if AC_TRY_EVAL(ac_compile); then # Now try to grab the symbols. nlist=conftest.nm if AC_TRY_EVAL(NM conftest.$ac_objext \| "$lt_cv_sys_global_symbol_pipe" \> $nlist) && test -s "$nlist"; then # Try sorting and uniquifying the output. if sort "$nlist" | uniq > "$nlist"T; then mv -f "$nlist"T "$nlist" else rm -f "$nlist"T fi # Make sure that we snagged all the symbols we need. if $GREP ' nm_test_var$' "$nlist" >/dev/null; then if $GREP ' nm_test_func$' "$nlist" >/dev/null; then cat <<_LT_EOF > conftest.$ac_ext /* Keep this code in sync between libtool.m4, ltmain, lt_system.h, and tests. */ #if defined(_WIN32) || defined(__CYGWIN__) || defined(_WIN32_WCE) /* DATA imports from DLLs on WIN32 con't be const, because runtime relocations are performed -- see ld's documentation on pseudo-relocs. */ # define LT@&t@_DLSYM_CONST #elif defined(__osf__) /* This system does not cope well with relocations in const data. */ # define LT@&t@_DLSYM_CONST #else # define LT@&t@_DLSYM_CONST const #endif #ifdef __cplusplus extern "C" { #endif _LT_EOF # Now generate the symbol file. eval "$lt_cv_sys_global_symbol_to_cdecl"' < "$nlist" | $GREP -v main >> conftest.$ac_ext' cat <<_LT_EOF >> conftest.$ac_ext /* The mapping between symbol names and symbols. */ LT@&t@_DLSYM_CONST struct { const char *name; void *address; } lt__PROGRAM__LTX_preloaded_symbols[[]] = { { "@PROGRAM@", (void *) 0 }, _LT_EOF $SED "s/^$symcode$symcode* \(.*\) \(.*\)$/ {\"\2\", (void *) \&\2},/" < "$nlist" | $GREP -v main >> conftest.$ac_ext cat <<\_LT_EOF >> conftest.$ac_ext {0, (void *) 0} }; /* This works around a problem in FreeBSD linker */ #ifdef FREEBSD_WORKAROUND static const void *lt_preloaded_setup() { return lt__PROGRAM__LTX_preloaded_symbols; } #endif #ifdef __cplusplus } #endif _LT_EOF # Now try linking the two files. mv conftest.$ac_objext conftstm.$ac_objext lt_globsym_save_LIBS=$LIBS lt_globsym_save_CFLAGS=$CFLAGS LIBS="conftstm.$ac_objext" CFLAGS="$CFLAGS$_LT_TAGVAR(lt_prog_compiler_no_builtin_flag, $1)" if AC_TRY_EVAL(ac_link) && test -s conftest${ac_exeext}; then pipe_works=yes fi LIBS=$lt_globsym_save_LIBS CFLAGS=$lt_globsym_save_CFLAGS else echo "cannot find nm_test_func in $nlist" >&AS_MESSAGE_LOG_FD fi else echo "cannot find nm_test_var in $nlist" >&AS_MESSAGE_LOG_FD fi else echo "cannot run $lt_cv_sys_global_symbol_pipe" >&AS_MESSAGE_LOG_FD fi else echo "$progname: failed program was:" >&AS_MESSAGE_LOG_FD cat conftest.$ac_ext >&5 fi rm -rf conftest* conftst* # Do not use the global_symbol_pipe unless it works. if test "$pipe_works" = yes; then break else lt_cv_sys_global_symbol_pipe= fi done ]) if test -z "$lt_cv_sys_global_symbol_pipe"; then lt_cv_sys_global_symbol_to_cdecl= fi if test -z "$lt_cv_sys_global_symbol_pipe$lt_cv_sys_global_symbol_to_cdecl"; then AC_MSG_RESULT(failed) else AC_MSG_RESULT(ok) fi # Response file support. if test "$lt_cv_nm_interface" = "MS dumpbin"; then nm_file_list_spec='@' elif $NM --help 2>/dev/null | grep '[[@]]FILE' >/dev/null; then nm_file_list_spec='@' fi _LT_DECL([global_symbol_pipe], [lt_cv_sys_global_symbol_pipe], [1], [Take the output of nm and produce a listing of raw symbols and C names]) _LT_DECL([global_symbol_to_cdecl], [lt_cv_sys_global_symbol_to_cdecl], [1], [Transform the output of nm in a proper C declaration]) _LT_DECL([global_symbol_to_c_name_address], [lt_cv_sys_global_symbol_to_c_name_address], [1], [Transform the output of nm in a C name address pair]) _LT_DECL([global_symbol_to_c_name_address_lib_prefix], [lt_cv_sys_global_symbol_to_c_name_address_lib_prefix], [1], [Transform the output of nm in a C name address pair when lib prefix is needed]) _LT_DECL([], [nm_file_list_spec], [1], [Specify filename containing input files for $NM]) ]) # _LT_CMD_GLOBAL_SYMBOLS # _LT_COMPILER_PIC([TAGNAME]) # --------------------------- m4_defun([_LT_COMPILER_PIC], [m4_require([_LT_TAG_COMPILER])dnl _LT_TAGVAR(lt_prog_compiler_wl, $1)= _LT_TAGVAR(lt_prog_compiler_pic, $1)= _LT_TAGVAR(lt_prog_compiler_static, $1)= m4_if([$1], [CXX], [ # C++ specific cases for pic, static, wl, etc. if test "$GXX" = yes; then _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' _LT_TAGVAR(lt_prog_compiler_static, $1)='-static' case $host_os in aix*) # All AIX code is PIC. if test "$host_cpu" = ia64; then # AIX 5 now supports IA64 processor _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' fi ;; amigaos*) case $host_cpu in powerpc) # see comment about AmigaOS4 .so support _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC' ;; m68k) # FIXME: we need at least 68020 code to build shared libraries, but # adding the `-m68020' flag to GCC prevents building anything better, # like `-m68040'. _LT_TAGVAR(lt_prog_compiler_pic, $1)='-m68020 -resident32 -malways-restore-a4' ;; esac ;; beos* | irix5* | irix6* | nonstopux* | osf3* | osf4* | osf5*) # PIC is the default for these OSes. ;; mingw* | cygwin* | os2* | pw32* | cegcc*) # This hack is so that the source file can tell whether it is being # built for inclusion in a dll (and should export symbols for example). # Although the cygwin gcc ignores -fPIC, still need this for old-style # (--disable-auto-import) libraries m4_if([$1], [GCJ], [], [_LT_TAGVAR(lt_prog_compiler_pic, $1)='-DDLL_EXPORT']) ;; darwin* | rhapsody*) # PIC is the default on this platform # Common symbols not allowed in MH_DYLIB files _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fno-common' ;; *djgpp*) # DJGPP does not support shared libraries at all _LT_TAGVAR(lt_prog_compiler_pic, $1)= ;; haiku*) # PIC is the default for Haiku. # The "-static" flag exists, but is broken. _LT_TAGVAR(lt_prog_compiler_static, $1)= ;; interix[[3-9]]*) # Interix 3.x gcc -fpic/-fPIC options generate broken code. # Instead, we relocate shared libraries at runtime. ;; sysv4*MP*) if test -d /usr/nec; then _LT_TAGVAR(lt_prog_compiler_pic, $1)=-Kconform_pic fi ;; hpux*) # PIC is the default for 64-bit PA HP-UX, but not for 32-bit # PA HP-UX. On IA64 HP-UX, PIC is the default but the pic flag # sets the default TLS model and affects inlining. case $host_cpu in hppa*64*) ;; *) _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC' ;; esac ;; *qnx* | *nto*) # QNX uses GNU C++, but need to define -shared option too, otherwise # it will coredump. _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC -shared' ;; *) _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC' ;; esac else case $host_os in aix[[4-9]]*) # All AIX code is PIC. if test "$host_cpu" = ia64; then # AIX 5 now supports IA64 processor _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' else _LT_TAGVAR(lt_prog_compiler_static, $1)='-bnso -bI:/lib/syscalls.exp' fi ;; chorus*) case $cc_basename in cxch68*) # Green Hills C++ Compiler # _LT_TAGVAR(lt_prog_compiler_static, $1)="--no_auto_instantiation -u __main -u __premain -u _abort -r $COOL_DIR/lib/libOrb.a $MVME_DIR/lib/CC/libC.a $MVME_DIR/lib/classix/libcx.s.a" ;; esac ;; mingw* | cygwin* | os2* | pw32* | cegcc*) # This hack is so that the source file can tell whether it is being # built for inclusion in a dll (and should export symbols for example). m4_if([$1], [GCJ], [], [_LT_TAGVAR(lt_prog_compiler_pic, $1)='-DDLL_EXPORT']) ;; dgux*) case $cc_basename in ec++*) _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC' ;; ghcx*) # Green Hills C++ Compiler _LT_TAGVAR(lt_prog_compiler_pic, $1)='-pic' ;; *) ;; esac ;; freebsd* | dragonfly*) # FreeBSD uses GNU C++ ;; hpux9* | hpux10* | hpux11*) case $cc_basename in CC*) _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' _LT_TAGVAR(lt_prog_compiler_static, $1)='${wl}-a ${wl}archive' if test "$host_cpu" != ia64; then _LT_TAGVAR(lt_prog_compiler_pic, $1)='+Z' fi ;; aCC*) _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' _LT_TAGVAR(lt_prog_compiler_static, $1)='${wl}-a ${wl}archive' case $host_cpu in hppa*64*|ia64*) # +Z the default ;; *) _LT_TAGVAR(lt_prog_compiler_pic, $1)='+Z' ;; esac ;; *) ;; esac ;; interix*) # This is c89, which is MS Visual C++ (no shared libs) # Anyone wants to do a port? ;; irix5* | irix6* | nonstopux*) case $cc_basename in CC*) _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' _LT_TAGVAR(lt_prog_compiler_static, $1)='-non_shared' # CC pic flag -KPIC is the default. ;; *) ;; esac ;; linux* | k*bsd*-gnu | kopensolaris*-gnu) case $cc_basename in KCC*) # KAI C++ Compiler _LT_TAGVAR(lt_prog_compiler_wl, $1)='--backend -Wl,' _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC' ;; ecpc* ) # old Intel C++ for x86_64 which still supported -KPIC. _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC' _LT_TAGVAR(lt_prog_compiler_static, $1)='-static' ;; icpc* ) # Intel C++, used to be incompatible with GCC. # ICC 10 doesn't accept -KPIC any more. _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC' _LT_TAGVAR(lt_prog_compiler_static, $1)='-static' ;; pgCC* | pgcpp*) # Portland Group C++ compiler _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fpic' _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' ;; cxx*) # Compaq C++ # Make sure the PIC flag is empty. It appears that all Alpha # Linux and Compaq Tru64 Unix objects are PIC. _LT_TAGVAR(lt_prog_compiler_pic, $1)= _LT_TAGVAR(lt_prog_compiler_static, $1)='-non_shared' ;; xlc* | xlC* | bgxl[[cC]]* | mpixl[[cC]]*) # IBM XL 8.0, 9.0 on PPC and BlueGene _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' _LT_TAGVAR(lt_prog_compiler_pic, $1)='-qpic' _LT_TAGVAR(lt_prog_compiler_static, $1)='-qstaticlink' ;; *) case `$CC -V 2>&1 | sed 5q` in *Sun\ C*) # Sun C++ 5.9 _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC' _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Qoption ld ' ;; esac ;; esac ;; lynxos*) ;; m88k*) ;; mvs*) case $cc_basename in cxx*) _LT_TAGVAR(lt_prog_compiler_pic, $1)='-W c,exportall' ;; *) ;; esac ;; netbsd*) ;; *qnx* | *nto*) # QNX uses GNU C++, but need to define -shared option too, otherwise # it will coredump. _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC -shared' ;; osf3* | osf4* | osf5*) case $cc_basename in KCC*) _LT_TAGVAR(lt_prog_compiler_wl, $1)='--backend -Wl,' ;; RCC*) # Rational C++ 2.4.1 _LT_TAGVAR(lt_prog_compiler_pic, $1)='-pic' ;; cxx*) # Digital/Compaq C++ _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' # Make sure the PIC flag is empty. It appears that all Alpha # Linux and Compaq Tru64 Unix objects are PIC. _LT_TAGVAR(lt_prog_compiler_pic, $1)= _LT_TAGVAR(lt_prog_compiler_static, $1)='-non_shared' ;; *) ;; esac ;; psos*) ;; solaris*) case $cc_basename in CC* | sunCC*) # Sun C++ 4.2, 5.x and Centerline C++ _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC' _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Qoption ld ' ;; gcx*) # Green Hills C++ Compiler _LT_TAGVAR(lt_prog_compiler_pic, $1)='-PIC' ;; *) ;; esac ;; sunos4*) case $cc_basename in CC*) # Sun C++ 4.x _LT_TAGVAR(lt_prog_compiler_pic, $1)='-pic' _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' ;; lcc*) # Lucid _LT_TAGVAR(lt_prog_compiler_pic, $1)='-pic' ;; *) ;; esac ;; sysv5* | unixware* | sco3.2v5* | sco5v6* | OpenUNIX*) case $cc_basename in CC*) _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC' _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' ;; esac ;; tandem*) case $cc_basename in NCC*) # NonStop-UX NCC 3.20 _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC' ;; *) ;; esac ;; vxworks*) ;; *) _LT_TAGVAR(lt_prog_compiler_can_build_shared, $1)=no ;; esac fi ], [ if test "$GCC" = yes; then _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' _LT_TAGVAR(lt_prog_compiler_static, $1)='-static' case $host_os in aix*) # All AIX code is PIC. if test "$host_cpu" = ia64; then # AIX 5 now supports IA64 processor _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' fi ;; amigaos*) case $host_cpu in powerpc) # see comment about AmigaOS4 .so support _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC' ;; m68k) # FIXME: we need at least 68020 code to build shared libraries, but # adding the `-m68020' flag to GCC prevents building anything better, # like `-m68040'. _LT_TAGVAR(lt_prog_compiler_pic, $1)='-m68020 -resident32 -malways-restore-a4' ;; esac ;; beos* | irix5* | irix6* | nonstopux* | osf3* | osf4* | osf5*) # PIC is the default for these OSes. ;; mingw* | cygwin* | pw32* | os2* | cegcc*) # This hack is so that the source file can tell whether it is being # built for inclusion in a dll (and should export symbols for example). # Although the cygwin gcc ignores -fPIC, still need this for old-style # (--disable-auto-import) libraries m4_if([$1], [GCJ], [], [_LT_TAGVAR(lt_prog_compiler_pic, $1)='-DDLL_EXPORT']) ;; darwin* | rhapsody*) # PIC is the default on this platform # Common symbols not allowed in MH_DYLIB files _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fno-common' ;; haiku*) # PIC is the default for Haiku. # The "-static" flag exists, but is broken. _LT_TAGVAR(lt_prog_compiler_static, $1)= ;; hpux*) # PIC is the default for 64-bit PA HP-UX, but not for 32-bit # PA HP-UX. On IA64 HP-UX, PIC is the default but the pic flag # sets the default TLS model and affects inlining. case $host_cpu in hppa*64*) # +Z the default ;; *) _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC' ;; esac ;; interix[[3-9]]*) # Interix 3.x gcc -fpic/-fPIC options generate broken code. # Instead, we relocate shared libraries at runtime. ;; msdosdjgpp*) # Just because we use GCC doesn't mean we suddenly get shared libraries # on systems that don't support them. _LT_TAGVAR(lt_prog_compiler_can_build_shared, $1)=no enable_shared=no ;; *nto* | *qnx*) # QNX uses GNU C++, but need to define -shared option too, otherwise # it will coredump. _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC -shared' ;; sysv4*MP*) if test -d /usr/nec; then _LT_TAGVAR(lt_prog_compiler_pic, $1)=-Kconform_pic fi ;; *) _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC' ;; esac case $cc_basename in nvcc*) # Cuda Compiler Driver 2.2 _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Xlinker ' if test -n "$_LT_TAGVAR(lt_prog_compiler_pic, $1)"; then _LT_TAGVAR(lt_prog_compiler_pic, $1)="-Xcompiler $_LT_TAGVAR(lt_prog_compiler_pic, $1)" fi ;; esac else # PORTME Check for flag to pass linker flags through the system compiler. case $host_os in aix*) _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' if test "$host_cpu" = ia64; then # AIX 5 now supports IA64 processor _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' else _LT_TAGVAR(lt_prog_compiler_static, $1)='-bnso -bI:/lib/syscalls.exp' fi ;; mingw* | cygwin* | pw32* | os2* | cegcc*) # This hack is so that the source file can tell whether it is being # built for inclusion in a dll (and should export symbols for example). m4_if([$1], [GCJ], [], [_LT_TAGVAR(lt_prog_compiler_pic, $1)='-DDLL_EXPORT']) ;; hpux9* | hpux10* | hpux11*) _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' # PIC is the default for IA64 HP-UX and 64-bit HP-UX, but # not for PA HP-UX. case $host_cpu in hppa*64*|ia64*) # +Z the default ;; *) _LT_TAGVAR(lt_prog_compiler_pic, $1)='+Z' ;; esac # Is there a better lt_prog_compiler_static that works with the bundled CC? _LT_TAGVAR(lt_prog_compiler_static, $1)='${wl}-a ${wl}archive' ;; irix5* | irix6* | nonstopux*) _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' # PIC (with -KPIC) is the default. _LT_TAGVAR(lt_prog_compiler_static, $1)='-non_shared' ;; linux* | k*bsd*-gnu | kopensolaris*-gnu) case $cc_basename in # old Intel for x86_64 which still supported -KPIC. ecc*) _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC' _LT_TAGVAR(lt_prog_compiler_static, $1)='-static' ;; # icc used to be incompatible with GCC. # ICC 10 doesn't accept -KPIC any more. icc* | ifort*) _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC' _LT_TAGVAR(lt_prog_compiler_static, $1)='-static' ;; # Lahey Fortran 8.1. lf95*) _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' _LT_TAGVAR(lt_prog_compiler_pic, $1)='--shared' _LT_TAGVAR(lt_prog_compiler_static, $1)='--static' ;; nagfor*) # NAG Fortran compiler _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,-Wl,,' _LT_TAGVAR(lt_prog_compiler_pic, $1)='-PIC' _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' ;; pgcc* | pgf77* | pgf90* | pgf95* | pgfortran*) # Portland Group compilers (*not* the Pentium gcc compiler, # which looks to be a dead project) _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fpic' _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' ;; ccc*) _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' # All Alpha code is PIC. _LT_TAGVAR(lt_prog_compiler_static, $1)='-non_shared' ;; xl* | bgxl* | bgf* | mpixl*) # IBM XL C 8.0/Fortran 10.1, 11.1 on PPC and BlueGene _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' _LT_TAGVAR(lt_prog_compiler_pic, $1)='-qpic' _LT_TAGVAR(lt_prog_compiler_static, $1)='-qstaticlink' ;; *) case `$CC -V 2>&1 | sed 5q` in *Sun\ Ceres\ Fortran* | *Sun*Fortran*\ [[1-7]].* | *Sun*Fortran*\ 8.[[0-3]]*) # Sun Fortran 8.3 passes all unrecognized flags to the linker _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC' _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' _LT_TAGVAR(lt_prog_compiler_wl, $1)='' ;; *Sun\ F* | *Sun*Fortran*) _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC' _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Qoption ld ' ;; *Sun\ C*) # Sun C 5.9 _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC' _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' ;; *Intel*\ [[CF]]*Compiler*) _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC' _LT_TAGVAR(lt_prog_compiler_static, $1)='-static' ;; *Portland\ Group*) _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fpic' _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' ;; esac ;; esac ;; newsos6) _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC' _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' ;; *nto* | *qnx*) # QNX uses GNU C++, but need to define -shared option too, otherwise # it will coredump. _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC -shared' ;; osf3* | osf4* | osf5*) _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' # All OSF/1 code is PIC. _LT_TAGVAR(lt_prog_compiler_static, $1)='-non_shared' ;; rdos*) _LT_TAGVAR(lt_prog_compiler_static, $1)='-non_shared' ;; solaris*) _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC' _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' case $cc_basename in f77* | f90* | f95* | sunf77* | sunf90* | sunf95*) _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Qoption ld ';; *) _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,';; esac ;; sunos4*) _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Qoption ld ' _LT_TAGVAR(lt_prog_compiler_pic, $1)='-PIC' _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' ;; sysv4 | sysv4.2uw2* | sysv4.3*) _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC' _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' ;; sysv4*MP*) if test -d /usr/nec ;then _LT_TAGVAR(lt_prog_compiler_pic, $1)='-Kconform_pic' _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' fi ;; sysv5* | unixware* | sco3.2v5* | sco5v6* | OpenUNIX*) _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC' _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' ;; unicos*) _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' _LT_TAGVAR(lt_prog_compiler_can_build_shared, $1)=no ;; uts4*) _LT_TAGVAR(lt_prog_compiler_pic, $1)='-pic' _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' ;; *) _LT_TAGVAR(lt_prog_compiler_can_build_shared, $1)=no ;; esac fi ]) case $host_os in # For platforms which do not support PIC, -DPIC is meaningless: *djgpp*) _LT_TAGVAR(lt_prog_compiler_pic, $1)= ;; *) _LT_TAGVAR(lt_prog_compiler_pic, $1)="$_LT_TAGVAR(lt_prog_compiler_pic, $1)@&t@m4_if([$1],[],[ -DPIC],[m4_if([$1],[CXX],[ -DPIC],[])])" ;; esac AC_CACHE_CHECK([for $compiler option to produce PIC], [_LT_TAGVAR(lt_cv_prog_compiler_pic, $1)], [_LT_TAGVAR(lt_cv_prog_compiler_pic, $1)=$_LT_TAGVAR(lt_prog_compiler_pic, $1)]) _LT_TAGVAR(lt_prog_compiler_pic, $1)=$_LT_TAGVAR(lt_cv_prog_compiler_pic, $1) # # Check to make sure the PIC flag actually works. # if test -n "$_LT_TAGVAR(lt_prog_compiler_pic, $1)"; then _LT_COMPILER_OPTION([if $compiler PIC flag $_LT_TAGVAR(lt_prog_compiler_pic, $1) works], [_LT_TAGVAR(lt_cv_prog_compiler_pic_works, $1)], [$_LT_TAGVAR(lt_prog_compiler_pic, $1)@&t@m4_if([$1],[],[ -DPIC],[m4_if([$1],[CXX],[ -DPIC],[])])], [], [case $_LT_TAGVAR(lt_prog_compiler_pic, $1) in "" | " "*) ;; *) _LT_TAGVAR(lt_prog_compiler_pic, $1)=" $_LT_TAGVAR(lt_prog_compiler_pic, $1)" ;; esac], [_LT_TAGVAR(lt_prog_compiler_pic, $1)= _LT_TAGVAR(lt_prog_compiler_can_build_shared, $1)=no]) fi _LT_TAGDECL([pic_flag], [lt_prog_compiler_pic], [1], [Additional compiler flags for building library objects]) _LT_TAGDECL([wl], [lt_prog_compiler_wl], [1], [How to pass a linker flag through the compiler]) # # Check to make sure the static flag actually works. # wl=$_LT_TAGVAR(lt_prog_compiler_wl, $1) eval lt_tmp_static_flag=\"$_LT_TAGVAR(lt_prog_compiler_static, $1)\" _LT_LINKER_OPTION([if $compiler static flag $lt_tmp_static_flag works], _LT_TAGVAR(lt_cv_prog_compiler_static_works, $1), $lt_tmp_static_flag, [], [_LT_TAGVAR(lt_prog_compiler_static, $1)=]) _LT_TAGDECL([link_static_flag], [lt_prog_compiler_static], [1], [Compiler flag to prevent dynamic linking]) ])# _LT_COMPILER_PIC # _LT_LINKER_SHLIBS([TAGNAME]) # ---------------------------- # See if the linker supports building shared libraries. m4_defun([_LT_LINKER_SHLIBS], [AC_REQUIRE([LT_PATH_LD])dnl AC_REQUIRE([LT_PATH_NM])dnl m4_require([_LT_PATH_MANIFEST_TOOL])dnl m4_require([_LT_FILEUTILS_DEFAULTS])dnl m4_require([_LT_DECL_EGREP])dnl m4_require([_LT_DECL_SED])dnl m4_require([_LT_CMD_GLOBAL_SYMBOLS])dnl m4_require([_LT_TAG_COMPILER])dnl AC_MSG_CHECKING([whether the $compiler linker ($LD) supports shared libraries]) m4_if([$1], [CXX], [ _LT_TAGVAR(export_symbols_cmds, $1)='$NM $libobjs $convenience | $global_symbol_pipe | $SED '\''s/.* //'\'' | sort | uniq > $export_symbols' _LT_TAGVAR(exclude_expsyms, $1)=['_GLOBAL_OFFSET_TABLE_|_GLOBAL__F[ID]_.*'] case $host_os in aix[[4-9]]*) # If we're using GNU nm, then we don't want the "-C" option. # -C means demangle to AIX nm, but means don't demangle with GNU nm # Also, AIX nm treats weak defined symbols like other global defined # symbols, whereas GNU nm marks them as "W". if $NM -V 2>&1 | $GREP 'GNU' > /dev/null; then _LT_TAGVAR(export_symbols_cmds, $1)='$NM -Bpg $libobjs $convenience | awk '\''{ if (((\$ 2 == "T") || (\$ 2 == "D") || (\$ 2 == "B") || (\$ 2 == "W")) && ([substr](\$ 3,1,1) != ".")) { print \$ 3 } }'\'' | sort -u > $export_symbols' else _LT_TAGVAR(export_symbols_cmds, $1)='$NM -BCpg $libobjs $convenience | awk '\''{ if (((\$ 2 == "T") || (\$ 2 == "D") || (\$ 2 == "B")) && ([substr](\$ 3,1,1) != ".")) { print \$ 3 } }'\'' | sort -u > $export_symbols' fi ;; pw32*) _LT_TAGVAR(export_symbols_cmds, $1)="$ltdll_cmds" ;; cygwin* | mingw* | cegcc*) case $cc_basename in cl*) _LT_TAGVAR(exclude_expsyms, $1)='_NULL_IMPORT_DESCRIPTOR|_IMPORT_DESCRIPTOR_.*' ;; *) _LT_TAGVAR(export_symbols_cmds, $1)='$NM $libobjs $convenience | $global_symbol_pipe | $SED -e '\''/^[[BCDGRS]][[ ]]/s/.*[[ ]]\([[^ ]]*\)/\1 DATA/;s/^.*[[ ]]__nm__\([[^ ]]*\)[[ ]][[^ ]]*/\1 DATA/;/^I[[ ]]/d;/^[[AITW]][[ ]]/s/.* //'\'' | sort | uniq > $export_symbols' _LT_TAGVAR(exclude_expsyms, $1)=['[_]+GLOBAL_OFFSET_TABLE_|[_]+GLOBAL__[FID]_.*|[_]+head_[A-Za-z0-9_]+_dll|[A-Za-z0-9_]+_dll_iname'] ;; esac ;; *) _LT_TAGVAR(export_symbols_cmds, $1)='$NM $libobjs $convenience | $global_symbol_pipe | $SED '\''s/.* //'\'' | sort | uniq > $export_symbols' ;; esac ], [ runpath_var= _LT_TAGVAR(allow_undefined_flag, $1)= _LT_TAGVAR(always_export_symbols, $1)=no _LT_TAGVAR(archive_cmds, $1)= _LT_TAGVAR(archive_expsym_cmds, $1)= _LT_TAGVAR(compiler_needs_object, $1)=no _LT_TAGVAR(enable_shared_with_static_runtimes, $1)=no _LT_TAGVAR(export_dynamic_flag_spec, $1)= _LT_TAGVAR(export_symbols_cmds, $1)='$NM $libobjs $convenience | $global_symbol_pipe | $SED '\''s/.* //'\'' | sort | uniq > $export_symbols' _LT_TAGVAR(hardcode_automatic, $1)=no _LT_TAGVAR(hardcode_direct, $1)=no _LT_TAGVAR(hardcode_direct_absolute, $1)=no _LT_TAGVAR(hardcode_libdir_flag_spec, $1)= _LT_TAGVAR(hardcode_libdir_separator, $1)= _LT_TAGVAR(hardcode_minus_L, $1)=no _LT_TAGVAR(hardcode_shlibpath_var, $1)=unsupported _LT_TAGVAR(inherit_rpath, $1)=no _LT_TAGVAR(link_all_deplibs, $1)=unknown _LT_TAGVAR(module_cmds, $1)= _LT_TAGVAR(module_expsym_cmds, $1)= _LT_TAGVAR(old_archive_from_new_cmds, $1)= _LT_TAGVAR(old_archive_from_expsyms_cmds, $1)= _LT_TAGVAR(thread_safe_flag_spec, $1)= _LT_TAGVAR(whole_archive_flag_spec, $1)= # include_expsyms should be a list of space-separated symbols to be *always* # included in the symbol list _LT_TAGVAR(include_expsyms, $1)= # exclude_expsyms can be an extended regexp of symbols to exclude # it will be wrapped by ` (' and `)$', so one must not match beginning or # end of line. Example: `a|bc|.*d.*' will exclude the symbols `a' and `bc', # as well as any symbol that contains `d'. _LT_TAGVAR(exclude_expsyms, $1)=['_GLOBAL_OFFSET_TABLE_|_GLOBAL__F[ID]_.*'] # Although _GLOBAL_OFFSET_TABLE_ is a valid symbol C name, most a.out # platforms (ab)use it in PIC code, but their linkers get confused if # the symbol is explicitly referenced. Since portable code cannot # rely on this symbol name, it's probably fine to never include it in # preloaded symbol tables. # Exclude shared library initialization/finalization symbols. dnl Note also adjust exclude_expsyms for C++ above. extract_expsyms_cmds= case $host_os in cygwin* | mingw* | pw32* | cegcc*) # FIXME: the MSVC++ port hasn't been tested in a loooong time # When not using gcc, we currently assume that we are using # Microsoft Visual C++. if test "$GCC" != yes; then with_gnu_ld=no fi ;; interix*) # we just hope/assume this is gcc and not c89 (= MSVC++) with_gnu_ld=yes ;; openbsd*) with_gnu_ld=no ;; esac _LT_TAGVAR(ld_shlibs, $1)=yes # On some targets, GNU ld is compatible enough with the native linker # that we're better off using the native interface for both. lt_use_gnu_ld_interface=no if test "$with_gnu_ld" = yes; then case $host_os in aix*) # The AIX port of GNU ld has always aspired to compatibility # with the native linker. However, as the warning in the GNU ld # block says, versions before 2.19.5* couldn't really create working # shared libraries, regardless of the interface used. case `$LD -v 2>&1` in *\ \(GNU\ Binutils\)\ 2.19.5*) ;; *\ \(GNU\ Binutils\)\ 2.[[2-9]]*) ;; *\ \(GNU\ Binutils\)\ [[3-9]]*) ;; *) lt_use_gnu_ld_interface=yes ;; esac ;; *) lt_use_gnu_ld_interface=yes ;; esac fi if test "$lt_use_gnu_ld_interface" = yes; then # If archive_cmds runs LD, not CC, wlarc should be empty wlarc='${wl}' # Set some defaults for GNU ld with shared library support. These # are reset later if shared libraries are not supported. Putting them # here allows them to be overridden if necessary. runpath_var=LD_RUN_PATH _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath ${wl}$libdir' _LT_TAGVAR(export_dynamic_flag_spec, $1)='${wl}--export-dynamic' # ancient GNU ld didn't support --whole-archive et. al. if $LD --help 2>&1 | $GREP 'no-whole-archive' > /dev/null; then _LT_TAGVAR(whole_archive_flag_spec, $1)="$wlarc"'--whole-archive$convenience '"$wlarc"'--no-whole-archive' else _LT_TAGVAR(whole_archive_flag_spec, $1)= fi supports_anon_versioning=no case `$LD -v 2>&1` in *GNU\ gold*) supports_anon_versioning=yes ;; *\ [[01]].* | *\ 2.[[0-9]].* | *\ 2.10.*) ;; # catch versions < 2.11 *\ 2.11.93.0.2\ *) supports_anon_versioning=yes ;; # RH7.3 ... *\ 2.11.92.0.12\ *) supports_anon_versioning=yes ;; # Mandrake 8.2 ... *\ 2.11.*) ;; # other 2.11 versions *) supports_anon_versioning=yes ;; esac # See if GNU ld supports shared libraries. case $host_os in aix[[3-9]]*) # On AIX/PPC, the GNU linker is very broken if test "$host_cpu" != ia64; then _LT_TAGVAR(ld_shlibs, $1)=no cat <<_LT_EOF 1>&2 *** Warning: the GNU linker, at least up to release 2.19, is reported *** to be unable to reliably create shared libraries on AIX. *** Therefore, libtool is disabling shared libraries support. If you *** really care for shared libraries, you may want to install binutils *** 2.20 or above, or modify your PATH so that a non-GNU linker is found. *** You will then need to restart the configuration process. _LT_EOF fi ;; amigaos*) case $host_cpu in powerpc) # see comment about AmigaOS4 .so support _LT_TAGVAR(archive_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' _LT_TAGVAR(archive_expsym_cmds, $1)='' ;; m68k) _LT_TAGVAR(archive_cmds, $1)='$RM $output_objdir/a2ixlibrary.data~$ECHO "#define NAME $libname" > $output_objdir/a2ixlibrary.data~$ECHO "#define LIBRARY_ID 1" >> $output_objdir/a2ixlibrary.data~$ECHO "#define VERSION $major" >> $output_objdir/a2ixlibrary.data~$ECHO "#define REVISION $revision" >> $output_objdir/a2ixlibrary.data~$AR $AR_FLAGS $lib $libobjs~$RANLIB $lib~(cd $output_objdir && a2ixlibrary -32)' _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir' _LT_TAGVAR(hardcode_minus_L, $1)=yes ;; esac ;; beos*) if $LD --help 2>&1 | $GREP ': supported targets:.* elf' > /dev/null; then _LT_TAGVAR(allow_undefined_flag, $1)=unsupported # Joseph Beckenbach says some releases of gcc # support --undefined. This deserves some investigation. FIXME _LT_TAGVAR(archive_cmds, $1)='$CC -nostart $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' else _LT_TAGVAR(ld_shlibs, $1)=no fi ;; cygwin* | mingw* | pw32* | cegcc*) # _LT_TAGVAR(hardcode_libdir_flag_spec, $1) is actually meaningless, # as there is no search path for DLLs. _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir' _LT_TAGVAR(export_dynamic_flag_spec, $1)='${wl}--export-all-symbols' _LT_TAGVAR(allow_undefined_flag, $1)=unsupported _LT_TAGVAR(always_export_symbols, $1)=no _LT_TAGVAR(enable_shared_with_static_runtimes, $1)=yes _LT_TAGVAR(export_symbols_cmds, $1)='$NM $libobjs $convenience | $global_symbol_pipe | $SED -e '\''/^[[BCDGRS]][[ ]]/s/.*[[ ]]\([[^ ]]*\)/\1 DATA/;s/^.*[[ ]]__nm__\([[^ ]]*\)[[ ]][[^ ]]*/\1 DATA/;/^I[[ ]]/d;/^[[AITW]][[ ]]/s/.* //'\'' | sort | uniq > $export_symbols' _LT_TAGVAR(exclude_expsyms, $1)=['[_]+GLOBAL_OFFSET_TABLE_|[_]+GLOBAL__[FID]_.*|[_]+head_[A-Za-z0-9_]+_dll|[A-Za-z0-9_]+_dll_iname'] if $LD --help 2>&1 | $GREP 'auto-import' > /dev/null; then _LT_TAGVAR(archive_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags -o $output_objdir/$soname ${wl}--enable-auto-image-base -Xlinker --out-implib -Xlinker $lib' # If the export-symbols file already is a .def file (1st line # is EXPORTS), use it as is; otherwise, prepend... _LT_TAGVAR(archive_expsym_cmds, $1)='if test "x`$SED 1q $export_symbols`" = xEXPORTS; then cp $export_symbols $output_objdir/$soname.def; else echo EXPORTS > $output_objdir/$soname.def; cat $export_symbols >> $output_objdir/$soname.def; fi~ $CC -shared $output_objdir/$soname.def $libobjs $deplibs $compiler_flags -o $output_objdir/$soname ${wl}--enable-auto-image-base -Xlinker --out-implib -Xlinker $lib' else _LT_TAGVAR(ld_shlibs, $1)=no fi ;; haiku*) _LT_TAGVAR(archive_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' _LT_TAGVAR(link_all_deplibs, $1)=yes ;; interix[[3-9]]*) _LT_TAGVAR(hardcode_direct, $1)=no _LT_TAGVAR(hardcode_shlibpath_var, $1)=no _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath,$libdir' _LT_TAGVAR(export_dynamic_flag_spec, $1)='${wl}-E' # Hack: On Interix 3.x, we cannot compile PIC because of a broken gcc. # Instead, shared libraries are loaded at an image base (0x10000000 by # default) and relocated if they conflict, which is a slow very memory # consuming and fragmenting process. To avoid this, we pick a random, # 256 KiB-aligned image base between 0x50000000 and 0x6FFC0000 at link # time. Moving up from 0x10000000 also allows more sbrk(2) space. _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-h,$soname ${wl}--image-base,`expr ${RANDOM-$$} % 4096 / 2 \* 262144 + 1342177280` -o $lib' _LT_TAGVAR(archive_expsym_cmds, $1)='sed "s,^,_," $export_symbols >$output_objdir/$soname.expsym~$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-h,$soname ${wl}--retain-symbols-file,$output_objdir/$soname.expsym ${wl}--image-base,`expr ${RANDOM-$$} % 4096 / 2 \* 262144 + 1342177280` -o $lib' ;; gnu* | linux* | tpf* | k*bsd*-gnu | kopensolaris*-gnu) tmp_diet=no if test "$host_os" = linux-dietlibc; then case $cc_basename in diet\ *) tmp_diet=yes;; # linux-dietlibc with static linking (!diet-dyn) esac fi if $LD --help 2>&1 | $EGREP ': supported targets:.* elf' > /dev/null \ && test "$tmp_diet" = no then tmp_addflag=' $pic_flag' tmp_sharedflag='-shared' case $cc_basename,$host_cpu in pgcc*) # Portland Group C compiler _LT_TAGVAR(whole_archive_flag_spec, $1)='${wl}--whole-archive`for conv in $convenience\"\"; do test -n \"$conv\" && new_convenience=\"$new_convenience,$conv\"; done; func_echo_all \"$new_convenience\"` ${wl}--no-whole-archive' tmp_addflag=' $pic_flag' ;; pgf77* | pgf90* | pgf95* | pgfortran*) # Portland Group f77 and f90 compilers _LT_TAGVAR(whole_archive_flag_spec, $1)='${wl}--whole-archive`for conv in $convenience\"\"; do test -n \"$conv\" && new_convenience=\"$new_convenience,$conv\"; done; func_echo_all \"$new_convenience\"` ${wl}--no-whole-archive' tmp_addflag=' $pic_flag -Mnomain' ;; ecc*,ia64* | icc*,ia64*) # Intel C compiler on ia64 tmp_addflag=' -i_dynamic' ;; efc*,ia64* | ifort*,ia64*) # Intel Fortran compiler on ia64 tmp_addflag=' -i_dynamic -nofor_main' ;; ifc* | ifort*) # Intel Fortran compiler tmp_addflag=' -nofor_main' ;; lf95*) # Lahey Fortran 8.1 _LT_TAGVAR(whole_archive_flag_spec, $1)= tmp_sharedflag='--shared' ;; xl[[cC]]* | bgxl[[cC]]* | mpixl[[cC]]*) # IBM XL C 8.0 on PPC (deal with xlf below) tmp_sharedflag='-qmkshrobj' tmp_addflag= ;; nvcc*) # Cuda Compiler Driver 2.2 _LT_TAGVAR(whole_archive_flag_spec, $1)='${wl}--whole-archive`for conv in $convenience\"\"; do test -n \"$conv\" && new_convenience=\"$new_convenience,$conv\"; done; func_echo_all \"$new_convenience\"` ${wl}--no-whole-archive' _LT_TAGVAR(compiler_needs_object, $1)=yes ;; esac case `$CC -V 2>&1 | sed 5q` in *Sun\ C*) # Sun C 5.9 _LT_TAGVAR(whole_archive_flag_spec, $1)='${wl}--whole-archive`new_convenience=; for conv in $convenience\"\"; do test -z \"$conv\" || new_convenience=\"$new_convenience,$conv\"; done; func_echo_all \"$new_convenience\"` ${wl}--no-whole-archive' _LT_TAGVAR(compiler_needs_object, $1)=yes tmp_sharedflag='-G' ;; *Sun\ F*) # Sun Fortran 8.3 tmp_sharedflag='-G' ;; esac _LT_TAGVAR(archive_cmds, $1)='$CC '"$tmp_sharedflag""$tmp_addflag"' $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' if test "x$supports_anon_versioning" = xyes; then _LT_TAGVAR(archive_expsym_cmds, $1)='echo "{ global:" > $output_objdir/$libname.ver~ cat $export_symbols | sed -e "s/\(.*\)/\1;/" >> $output_objdir/$libname.ver~ echo "local: *; };" >> $output_objdir/$libname.ver~ $CC '"$tmp_sharedflag""$tmp_addflag"' $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-version-script ${wl}$output_objdir/$libname.ver -o $lib' fi case $cc_basename in xlf* | bgf* | bgxlf* | mpixlf*) # IBM XL Fortran 10.1 on PPC cannot create shared libs itself _LT_TAGVAR(whole_archive_flag_spec, $1)='--whole-archive$convenience --no-whole-archive' _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath ${wl}$libdir' _LT_TAGVAR(archive_cmds, $1)='$LD -shared $libobjs $deplibs $linker_flags -soname $soname -o $lib' if test "x$supports_anon_versioning" = xyes; then _LT_TAGVAR(archive_expsym_cmds, $1)='echo "{ global:" > $output_objdir/$libname.ver~ cat $export_symbols | sed -e "s/\(.*\)/\1;/" >> $output_objdir/$libname.ver~ echo "local: *; };" >> $output_objdir/$libname.ver~ $LD -shared $libobjs $deplibs $linker_flags -soname $soname -version-script $output_objdir/$libname.ver -o $lib' fi ;; esac else _LT_TAGVAR(ld_shlibs, $1)=no fi ;; netbsd*) if echo __ELF__ | $CC -E - | $GREP __ELF__ >/dev/null; then _LT_TAGVAR(archive_cmds, $1)='$LD -Bshareable $libobjs $deplibs $linker_flags -o $lib' wlarc= else _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' fi ;; solaris*) if $LD -v 2>&1 | $GREP 'BFD 2\.8' > /dev/null; then _LT_TAGVAR(ld_shlibs, $1)=no cat <<_LT_EOF 1>&2 *** Warning: The releases 2.8.* of the GNU linker cannot reliably *** create shared libraries on Solaris systems. Therefore, libtool *** is disabling shared libraries support. We urge you to upgrade GNU *** binutils to release 2.9.1 or newer. Another option is to modify *** your PATH or compiler configuration so that the native linker is *** used, and then restart. _LT_EOF elif $LD --help 2>&1 | $GREP ': supported targets:.* elf' > /dev/null; then _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' else _LT_TAGVAR(ld_shlibs, $1)=no fi ;; sysv5* | sco3.2v5* | sco5v6* | unixware* | OpenUNIX*) case `$LD -v 2>&1` in *\ [[01]].* | *\ 2.[[0-9]].* | *\ 2.1[[0-5]].*) _LT_TAGVAR(ld_shlibs, $1)=no cat <<_LT_EOF 1>&2 *** Warning: Releases of the GNU linker prior to 2.16.91.0.3 can not *** reliably create shared libraries on SCO systems. Therefore, libtool *** is disabling shared libraries support. We urge you to upgrade GNU *** binutils to release 2.16.91.0.3 or newer. Another option is to modify *** your PATH or compiler configuration so that the native linker is *** used, and then restart. _LT_EOF ;; *) # For security reasons, it is highly recommended that you always # use absolute paths for naming shared libraries, and exclude the # DT_RUNPATH tag from executables and libraries. But doing so # requires that you compile everything twice, which is a pain. if $LD --help 2>&1 | $GREP ': supported targets:.* elf' > /dev/null; then _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath ${wl}$libdir' _LT_TAGVAR(archive_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' else _LT_TAGVAR(ld_shlibs, $1)=no fi ;; esac ;; sunos4*) _LT_TAGVAR(archive_cmds, $1)='$LD -assert pure-text -Bshareable -o $lib $libobjs $deplibs $linker_flags' wlarc= _LT_TAGVAR(hardcode_direct, $1)=yes _LT_TAGVAR(hardcode_shlibpath_var, $1)=no ;; *) if $LD --help 2>&1 | $GREP ': supported targets:.* elf' > /dev/null; then _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' else _LT_TAGVAR(ld_shlibs, $1)=no fi ;; esac if test "$_LT_TAGVAR(ld_shlibs, $1)" = no; then runpath_var= _LT_TAGVAR(hardcode_libdir_flag_spec, $1)= _LT_TAGVAR(export_dynamic_flag_spec, $1)= _LT_TAGVAR(whole_archive_flag_spec, $1)= fi else # PORTME fill in a description of your system's linker (not GNU ld) case $host_os in aix3*) _LT_TAGVAR(allow_undefined_flag, $1)=unsupported _LT_TAGVAR(always_export_symbols, $1)=yes _LT_TAGVAR(archive_expsym_cmds, $1)='$LD -o $output_objdir/$soname $libobjs $deplibs $linker_flags -bE:$export_symbols -T512 -H512 -bM:SRE~$AR $AR_FLAGS $lib $output_objdir/$soname' # Note: this linker hardcodes the directories in LIBPATH if there # are no directories specified by -L. _LT_TAGVAR(hardcode_minus_L, $1)=yes if test "$GCC" = yes && test -z "$lt_prog_compiler_static"; then # Neither direct hardcoding nor static linking is supported with a # broken collect2. _LT_TAGVAR(hardcode_direct, $1)=unsupported fi ;; aix[[4-9]]*) if test "$host_cpu" = ia64; then # On IA64, the linker does run time linking by default, so we don't # have to do anything special. aix_use_runtimelinking=no exp_sym_flag='-Bexport' no_entry_flag="" else # If we're using GNU nm, then we don't want the "-C" option. # -C means demangle to AIX nm, but means don't demangle with GNU nm # Also, AIX nm treats weak defined symbols like other global # defined symbols, whereas GNU nm marks them as "W". if $NM -V 2>&1 | $GREP 'GNU' > /dev/null; then _LT_TAGVAR(export_symbols_cmds, $1)='$NM -Bpg $libobjs $convenience | awk '\''{ if (((\$ 2 == "T") || (\$ 2 == "D") || (\$ 2 == "B") || (\$ 2 == "W")) && ([substr](\$ 3,1,1) != ".")) { print \$ 3 } }'\'' | sort -u > $export_symbols' else _LT_TAGVAR(export_symbols_cmds, $1)='$NM -BCpg $libobjs $convenience | awk '\''{ if (((\$ 2 == "T") || (\$ 2 == "D") || (\$ 2 == "B")) && ([substr](\$ 3,1,1) != ".")) { print \$ 3 } }'\'' | sort -u > $export_symbols' fi aix_use_runtimelinking=no # Test if we are trying to use run time linking or normal # AIX style linking. If -brtl is somewhere in LDFLAGS, we # need to do runtime linking. case $host_os in aix4.[[23]]|aix4.[[23]].*|aix[[5-9]]*) for ld_flag in $LDFLAGS; do if (test $ld_flag = "-brtl" || test $ld_flag = "-Wl,-brtl"); then aix_use_runtimelinking=yes break fi done ;; esac exp_sym_flag='-bexport' no_entry_flag='-bnoentry' fi # When large executables or shared objects are built, AIX ld can # have problems creating the table of contents. If linking a library # or program results in "error TOC overflow" add -mminimal-toc to # CXXFLAGS/CFLAGS for g++/gcc. In the cases where that is not # enough to fix the problem, add -Wl,-bbigtoc to LDFLAGS. _LT_TAGVAR(archive_cmds, $1)='' _LT_TAGVAR(hardcode_direct, $1)=yes _LT_TAGVAR(hardcode_direct_absolute, $1)=yes _LT_TAGVAR(hardcode_libdir_separator, $1)=':' _LT_TAGVAR(link_all_deplibs, $1)=yes _LT_TAGVAR(file_list_spec, $1)='${wl}-f,' if test "$GCC" = yes; then case $host_os in aix4.[[012]]|aix4.[[012]].*) # We only want to do this on AIX 4.2 and lower, the check # below for broken collect2 doesn't work under 4.3+ collect2name=`${CC} -print-prog-name=collect2` if test -f "$collect2name" && strings "$collect2name" | $GREP resolve_lib_name >/dev/null then # We have reworked collect2 : else # We have old collect2 _LT_TAGVAR(hardcode_direct, $1)=unsupported # It fails to find uninstalled libraries when the uninstalled # path is not listed in the libpath. Setting hardcode_minus_L # to unsupported forces relinking _LT_TAGVAR(hardcode_minus_L, $1)=yes _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir' _LT_TAGVAR(hardcode_libdir_separator, $1)= fi ;; esac shared_flag='-shared' if test "$aix_use_runtimelinking" = yes; then shared_flag="$shared_flag "'${wl}-G' fi else # not using gcc if test "$host_cpu" = ia64; then # VisualAge C++, Version 5.5 for AIX 5L for IA-64, Beta 3 Release # chokes on -Wl,-G. The following line is correct: shared_flag='-G' else if test "$aix_use_runtimelinking" = yes; then shared_flag='${wl}-G' else shared_flag='${wl}-bM:SRE' fi fi fi _LT_TAGVAR(export_dynamic_flag_spec, $1)='${wl}-bexpall' # It seems that -bexpall does not export symbols beginning with # underscore (_), so it is better to generate a list of symbols to export. _LT_TAGVAR(always_export_symbols, $1)=yes if test "$aix_use_runtimelinking" = yes; then # Warning - without using the other runtime loading flags (-brtl), # -berok will link without error, but may produce a broken library. _LT_TAGVAR(allow_undefined_flag, $1)='-berok' # Determine the default libpath from the value encoded in an # empty executable. _LT_SYS_MODULE_PATH_AIX([$1]) _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-blibpath:$libdir:'"$aix_libpath" _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -o $output_objdir/$soname $libobjs $deplibs '"\${wl}$no_entry_flag"' $compiler_flags `if test "x${allow_undefined_flag}" != "x"; then func_echo_all "${wl}${allow_undefined_flag}"; else :; fi` '"\${wl}$exp_sym_flag:\$export_symbols $shared_flag" else if test "$host_cpu" = ia64; then _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-R $libdir:/usr/lib:/lib' _LT_TAGVAR(allow_undefined_flag, $1)="-z nodefs" _LT_TAGVAR(archive_expsym_cmds, $1)="\$CC $shared_flag"' -o $output_objdir/$soname $libobjs $deplibs '"\${wl}$no_entry_flag"' $compiler_flags ${wl}${allow_undefined_flag} '"\${wl}$exp_sym_flag:\$export_symbols" else # Determine the default libpath from the value encoded in an # empty executable. _LT_SYS_MODULE_PATH_AIX([$1]) _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-blibpath:$libdir:'"$aix_libpath" # Warning - without using the other run time loading flags, # -berok will link without error, but may produce a broken library. _LT_TAGVAR(no_undefined_flag, $1)=' ${wl}-bernotok' _LT_TAGVAR(allow_undefined_flag, $1)=' ${wl}-berok' if test "$with_gnu_ld" = yes; then # We only use this code for GNU lds that support --whole-archive. _LT_TAGVAR(whole_archive_flag_spec, $1)='${wl}--whole-archive$convenience ${wl}--no-whole-archive' else # Exported symbols can be pulled into shared objects from archives _LT_TAGVAR(whole_archive_flag_spec, $1)='$convenience' fi _LT_TAGVAR(archive_cmds_need_lc, $1)=yes # This is similar to how AIX traditionally builds its shared libraries. _LT_TAGVAR(archive_expsym_cmds, $1)="\$CC $shared_flag"' -o $output_objdir/$soname $libobjs $deplibs ${wl}-bnoentry $compiler_flags ${wl}-bE:$export_symbols${allow_undefined_flag}~$AR $AR_FLAGS $output_objdir/$libname$release.a $output_objdir/$soname' fi fi ;; amigaos*) case $host_cpu in powerpc) # see comment about AmigaOS4 .so support _LT_TAGVAR(archive_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' _LT_TAGVAR(archive_expsym_cmds, $1)='' ;; m68k) _LT_TAGVAR(archive_cmds, $1)='$RM $output_objdir/a2ixlibrary.data~$ECHO "#define NAME $libname" > $output_objdir/a2ixlibrary.data~$ECHO "#define LIBRARY_ID 1" >> $output_objdir/a2ixlibrary.data~$ECHO "#define VERSION $major" >> $output_objdir/a2ixlibrary.data~$ECHO "#define REVISION $revision" >> $output_objdir/a2ixlibrary.data~$AR $AR_FLAGS $lib $libobjs~$RANLIB $lib~(cd $output_objdir && a2ixlibrary -32)' _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir' _LT_TAGVAR(hardcode_minus_L, $1)=yes ;; esac ;; bsdi[[45]]*) _LT_TAGVAR(export_dynamic_flag_spec, $1)=-rdynamic ;; cygwin* | mingw* | pw32* | cegcc*) # When not using gcc, we currently assume that we are using # Microsoft Visual C++. # hardcode_libdir_flag_spec is actually meaningless, as there is # no search path for DLLs. case $cc_basename in cl*) # Native MSVC _LT_TAGVAR(hardcode_libdir_flag_spec, $1)=' ' _LT_TAGVAR(allow_undefined_flag, $1)=unsupported _LT_TAGVAR(always_export_symbols, $1)=yes _LT_TAGVAR(file_list_spec, $1)='@' # Tell ltmain to make .lib files, not .a files. libext=lib # Tell ltmain to make .dll files, not .so files. shrext_cmds=".dll" # FIXME: Setting linknames here is a bad hack. _LT_TAGVAR(archive_cmds, $1)='$CC -o $output_objdir/$soname $libobjs $compiler_flags $deplibs -Wl,-dll~linknames=' _LT_TAGVAR(archive_expsym_cmds, $1)='if test "x`$SED 1q $export_symbols`" = xEXPORTS; then sed -n -e 's/\\\\\\\(.*\\\\\\\)/-link\\\ -EXPORT:\\\\\\\1/' -e '1\\\!p' < $export_symbols > $output_objdir/$soname.exp; else sed -e 's/\\\\\\\(.*\\\\\\\)/-link\\\ -EXPORT:\\\\\\\1/' < $export_symbols > $output_objdir/$soname.exp; fi~ $CC -o $tool_output_objdir$soname $libobjs $compiler_flags $deplibs "@$tool_output_objdir$soname.exp" -Wl,-DLL,-IMPLIB:"$tool_output_objdir$libname.dll.lib"~ linknames=' # The linker will not automatically build a static lib if we build a DLL. # _LT_TAGVAR(old_archive_from_new_cmds, $1)='true' _LT_TAGVAR(enable_shared_with_static_runtimes, $1)=yes _LT_TAGVAR(exclude_expsyms, $1)='_NULL_IMPORT_DESCRIPTOR|_IMPORT_DESCRIPTOR_.*' _LT_TAGVAR(export_symbols_cmds, $1)='$NM $libobjs $convenience | $global_symbol_pipe | $SED -e '\''/^[[BCDGRS]][[ ]]/s/.*[[ ]]\([[^ ]]*\)/\1,DATA/'\'' | $SED -e '\''/^[[AITW]][[ ]]/s/.*[[ ]]//'\'' | sort | uniq > $export_symbols' # Don't use ranlib _LT_TAGVAR(old_postinstall_cmds, $1)='chmod 644 $oldlib' _LT_TAGVAR(postlink_cmds, $1)='lt_outputfile="@OUTPUT@"~ lt_tool_outputfile="@TOOL_OUTPUT@"~ case $lt_outputfile in *.exe|*.EXE) ;; *) lt_outputfile="$lt_outputfile.exe" lt_tool_outputfile="$lt_tool_outputfile.exe" ;; esac~ if test "$MANIFEST_TOOL" != ":" && test -f "$lt_outputfile.manifest"; then $MANIFEST_TOOL -manifest "$lt_tool_outputfile.manifest" -outputresource:"$lt_tool_outputfile" || exit 1; $RM "$lt_outputfile.manifest"; fi' ;; *) # Assume MSVC wrapper _LT_TAGVAR(hardcode_libdir_flag_spec, $1)=' ' _LT_TAGVAR(allow_undefined_flag, $1)=unsupported # Tell ltmain to make .lib files, not .a files. libext=lib # Tell ltmain to make .dll files, not .so files. shrext_cmds=".dll" # FIXME: Setting linknames here is a bad hack. _LT_TAGVAR(archive_cmds, $1)='$CC -o $lib $libobjs $compiler_flags `func_echo_all "$deplibs" | $SED '\''s/ -lc$//'\''` -link -dll~linknames=' # The linker will automatically build a .lib file if we build a DLL. _LT_TAGVAR(old_archive_from_new_cmds, $1)='true' # FIXME: Should let the user specify the lib program. _LT_TAGVAR(old_archive_cmds, $1)='lib -OUT:$oldlib$oldobjs$old_deplibs' _LT_TAGVAR(enable_shared_with_static_runtimes, $1)=yes ;; esac ;; darwin* | rhapsody*) _LT_DARWIN_LINKER_FEATURES($1) ;; dgux*) _LT_TAGVAR(archive_cmds, $1)='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir' _LT_TAGVAR(hardcode_shlibpath_var, $1)=no ;; # FreeBSD 2.2.[012] allows us to include c++rt0.o to get C++ constructor # support. Future versions do this automatically, but an explicit c++rt0.o # does not break anything, and helps significantly (at the cost of a little # extra space). freebsd2.2*) _LT_TAGVAR(archive_cmds, $1)='$LD -Bshareable -o $lib $libobjs $deplibs $linker_flags /usr/lib/c++rt0.o' _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-R$libdir' _LT_TAGVAR(hardcode_direct, $1)=yes _LT_TAGVAR(hardcode_shlibpath_var, $1)=no ;; # Unfortunately, older versions of FreeBSD 2 do not have this feature. freebsd2.*) _LT_TAGVAR(archive_cmds, $1)='$LD -Bshareable -o $lib $libobjs $deplibs $linker_flags' _LT_TAGVAR(hardcode_direct, $1)=yes _LT_TAGVAR(hardcode_minus_L, $1)=yes _LT_TAGVAR(hardcode_shlibpath_var, $1)=no ;; # FreeBSD 3 and greater uses gcc -shared to do shared libraries. freebsd* | dragonfly*) _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag -o $lib $libobjs $deplibs $compiler_flags' _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-R$libdir' _LT_TAGVAR(hardcode_direct, $1)=yes _LT_TAGVAR(hardcode_shlibpath_var, $1)=no ;; hpux9*) if test "$GCC" = yes; then _LT_TAGVAR(archive_cmds, $1)='$RM $output_objdir/$soname~$CC -shared $pic_flag ${wl}+b ${wl}$install_libdir -o $output_objdir/$soname $libobjs $deplibs $compiler_flags~test $output_objdir/$soname = $lib || mv $output_objdir/$soname $lib' else _LT_TAGVAR(archive_cmds, $1)='$RM $output_objdir/$soname~$LD -b +b $install_libdir -o $output_objdir/$soname $libobjs $deplibs $linker_flags~test $output_objdir/$soname = $lib || mv $output_objdir/$soname $lib' fi _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}+b ${wl}$libdir' _LT_TAGVAR(hardcode_libdir_separator, $1)=: _LT_TAGVAR(hardcode_direct, $1)=yes # hardcode_minus_L: Not really in the search PATH, # but as the default location of the library. _LT_TAGVAR(hardcode_minus_L, $1)=yes _LT_TAGVAR(export_dynamic_flag_spec, $1)='${wl}-E' ;; hpux10*) if test "$GCC" = yes && test "$with_gnu_ld" = no; then _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag ${wl}+h ${wl}$soname ${wl}+b ${wl}$install_libdir -o $lib $libobjs $deplibs $compiler_flags' else _LT_TAGVAR(archive_cmds, $1)='$LD -b +h $soname +b $install_libdir -o $lib $libobjs $deplibs $linker_flags' fi if test "$with_gnu_ld" = no; then _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}+b ${wl}$libdir' _LT_TAGVAR(hardcode_libdir_separator, $1)=: _LT_TAGVAR(hardcode_direct, $1)=yes _LT_TAGVAR(hardcode_direct_absolute, $1)=yes _LT_TAGVAR(export_dynamic_flag_spec, $1)='${wl}-E' # hardcode_minus_L: Not really in the search PATH, # but as the default location of the library. _LT_TAGVAR(hardcode_minus_L, $1)=yes fi ;; hpux11*) if test "$GCC" = yes && test "$with_gnu_ld" = no; then case $host_cpu in hppa*64*) _LT_TAGVAR(archive_cmds, $1)='$CC -shared ${wl}+h ${wl}$soname -o $lib $libobjs $deplibs $compiler_flags' ;; ia64*) _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag ${wl}+h ${wl}$soname ${wl}+nodefaultrpath -o $lib $libobjs $deplibs $compiler_flags' ;; *) _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag ${wl}+h ${wl}$soname ${wl}+b ${wl}$install_libdir -o $lib $libobjs $deplibs $compiler_flags' ;; esac else case $host_cpu in hppa*64*) _LT_TAGVAR(archive_cmds, $1)='$CC -b ${wl}+h ${wl}$soname -o $lib $libobjs $deplibs $compiler_flags' ;; ia64*) _LT_TAGVAR(archive_cmds, $1)='$CC -b ${wl}+h ${wl}$soname ${wl}+nodefaultrpath -o $lib $libobjs $deplibs $compiler_flags' ;; *) m4_if($1, [], [ # Older versions of the 11.00 compiler do not understand -b yet # (HP92453-01 A.11.01.20 doesn't, HP92453-01 B.11.X.35175-35176.GP does) _LT_LINKER_OPTION([if $CC understands -b], _LT_TAGVAR(lt_cv_prog_compiler__b, $1), [-b], [_LT_TAGVAR(archive_cmds, $1)='$CC -b ${wl}+h ${wl}$soname ${wl}+b ${wl}$install_libdir -o $lib $libobjs $deplibs $compiler_flags'], [_LT_TAGVAR(archive_cmds, $1)='$LD -b +h $soname +b $install_libdir -o $lib $libobjs $deplibs $linker_flags'])], [_LT_TAGVAR(archive_cmds, $1)='$CC -b ${wl}+h ${wl}$soname ${wl}+b ${wl}$install_libdir -o $lib $libobjs $deplibs $compiler_flags']) ;; esac fi if test "$with_gnu_ld" = no; then _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}+b ${wl}$libdir' _LT_TAGVAR(hardcode_libdir_separator, $1)=: case $host_cpu in hppa*64*|ia64*) _LT_TAGVAR(hardcode_direct, $1)=no _LT_TAGVAR(hardcode_shlibpath_var, $1)=no ;; *) _LT_TAGVAR(hardcode_direct, $1)=yes _LT_TAGVAR(hardcode_direct_absolute, $1)=yes _LT_TAGVAR(export_dynamic_flag_spec, $1)='${wl}-E' # hardcode_minus_L: Not really in the search PATH, # but as the default location of the library. _LT_TAGVAR(hardcode_minus_L, $1)=yes ;; esac fi ;; irix5* | irix6* | nonstopux*) if test "$GCC" = yes; then _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && func_echo_all "${wl}-set_version ${wl}$verstring"` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib' # Try to use the -exported_symbol ld option, if it does not # work, assume that -exports_file does not work either and # implicitly export all symbols. # This should be the same for all languages, so no per-tag cache variable. AC_CACHE_CHECK([whether the $host_os linker accepts -exported_symbol], [lt_cv_irix_exported_symbol], [save_LDFLAGS="$LDFLAGS" LDFLAGS="$LDFLAGS -shared ${wl}-exported_symbol ${wl}foo ${wl}-update_registry ${wl}/dev/null" AC_LINK_IFELSE( [AC_LANG_SOURCE( [AC_LANG_CASE([C], [[int foo (void) { return 0; }]], [C++], [[int foo (void) { return 0; }]], [Fortran 77], [[ subroutine foo end]], [Fortran], [[ subroutine foo end]])])], [lt_cv_irix_exported_symbol=yes], [lt_cv_irix_exported_symbol=no]) LDFLAGS="$save_LDFLAGS"]) if test "$lt_cv_irix_exported_symbol" = yes; then _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && func_echo_all "${wl}-set_version ${wl}$verstring"` ${wl}-update_registry ${wl}${output_objdir}/so_locations ${wl}-exports_file ${wl}$export_symbols -o $lib' fi else _LT_TAGVAR(archive_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags -soname $soname `test -n "$verstring" && func_echo_all "-set_version $verstring"` -update_registry ${output_objdir}/so_locations -o $lib' _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags -soname $soname `test -n "$verstring" && func_echo_all "-set_version $verstring"` -update_registry ${output_objdir}/so_locations -exports_file $export_symbols -o $lib' fi _LT_TAGVAR(archive_cmds_need_lc, $1)='no' _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath ${wl}$libdir' _LT_TAGVAR(hardcode_libdir_separator, $1)=: _LT_TAGVAR(inherit_rpath, $1)=yes _LT_TAGVAR(link_all_deplibs, $1)=yes ;; netbsd*) if echo __ELF__ | $CC -E - | $GREP __ELF__ >/dev/null; then _LT_TAGVAR(archive_cmds, $1)='$LD -Bshareable -o $lib $libobjs $deplibs $linker_flags' # a.out else _LT_TAGVAR(archive_cmds, $1)='$LD -shared -o $lib $libobjs $deplibs $linker_flags' # ELF fi _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-R$libdir' _LT_TAGVAR(hardcode_direct, $1)=yes _LT_TAGVAR(hardcode_shlibpath_var, $1)=no ;; newsos6) _LT_TAGVAR(archive_cmds, $1)='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' _LT_TAGVAR(hardcode_direct, $1)=yes _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath ${wl}$libdir' _LT_TAGVAR(hardcode_libdir_separator, $1)=: _LT_TAGVAR(hardcode_shlibpath_var, $1)=no ;; *nto* | *qnx*) ;; openbsd*) if test -f /usr/libexec/ld.so; then _LT_TAGVAR(hardcode_direct, $1)=yes _LT_TAGVAR(hardcode_shlibpath_var, $1)=no _LT_TAGVAR(hardcode_direct_absolute, $1)=yes if test -z "`echo __ELF__ | $CC -E - | $GREP __ELF__`" || test "$host_os-$host_cpu" = "openbsd2.8-powerpc"; then _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag -o $lib $libobjs $deplibs $compiler_flags' _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $pic_flag -o $lib $libobjs $deplibs $compiler_flags ${wl}-retain-symbols-file,$export_symbols' _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath,$libdir' _LT_TAGVAR(export_dynamic_flag_spec, $1)='${wl}-E' else case $host_os in openbsd[[01]].* | openbsd2.[[0-7]] | openbsd2.[[0-7]].*) _LT_TAGVAR(archive_cmds, $1)='$LD -Bshareable -o $lib $libobjs $deplibs $linker_flags' _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-R$libdir' ;; *) _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag -o $lib $libobjs $deplibs $compiler_flags' _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath,$libdir' ;; esac fi else _LT_TAGVAR(ld_shlibs, $1)=no fi ;; os2*) _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir' _LT_TAGVAR(hardcode_minus_L, $1)=yes _LT_TAGVAR(allow_undefined_flag, $1)=unsupported _LT_TAGVAR(archive_cmds, $1)='$ECHO "LIBRARY $libname INITINSTANCE" > $output_objdir/$libname.def~$ECHO "DESCRIPTION \"$libname\"" >> $output_objdir/$libname.def~echo DATA >> $output_objdir/$libname.def~echo " SINGLE NONSHARED" >> $output_objdir/$libname.def~echo EXPORTS >> $output_objdir/$libname.def~emxexp $libobjs >> $output_objdir/$libname.def~$CC -Zdll -Zcrtdll -o $lib $libobjs $deplibs $compiler_flags $output_objdir/$libname.def' _LT_TAGVAR(old_archive_from_new_cmds, $1)='emximp -o $output_objdir/$libname.a $output_objdir/$libname.def' ;; osf3*) if test "$GCC" = yes; then _LT_TAGVAR(allow_undefined_flag, $1)=' ${wl}-expect_unresolved ${wl}\*' _LT_TAGVAR(archive_cmds, $1)='$CC -shared${allow_undefined_flag} $libobjs $deplibs $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && func_echo_all "${wl}-set_version ${wl}$verstring"` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib' else _LT_TAGVAR(allow_undefined_flag, $1)=' -expect_unresolved \*' _LT_TAGVAR(archive_cmds, $1)='$CC -shared${allow_undefined_flag} $libobjs $deplibs $compiler_flags -soname $soname `test -n "$verstring" && func_echo_all "-set_version $verstring"` -update_registry ${output_objdir}/so_locations -o $lib' fi _LT_TAGVAR(archive_cmds_need_lc, $1)='no' _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath ${wl}$libdir' _LT_TAGVAR(hardcode_libdir_separator, $1)=: ;; osf4* | osf5*) # as osf3* with the addition of -msym flag if test "$GCC" = yes; then _LT_TAGVAR(allow_undefined_flag, $1)=' ${wl}-expect_unresolved ${wl}\*' _LT_TAGVAR(archive_cmds, $1)='$CC -shared${allow_undefined_flag} $pic_flag $libobjs $deplibs $compiler_flags ${wl}-msym ${wl}-soname ${wl}$soname `test -n "$verstring" && func_echo_all "${wl}-set_version ${wl}$verstring"` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib' _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath ${wl}$libdir' else _LT_TAGVAR(allow_undefined_flag, $1)=' -expect_unresolved \*' _LT_TAGVAR(archive_cmds, $1)='$CC -shared${allow_undefined_flag} $libobjs $deplibs $compiler_flags -msym -soname $soname `test -n "$verstring" && func_echo_all "-set_version $verstring"` -update_registry ${output_objdir}/so_locations -o $lib' _LT_TAGVAR(archive_expsym_cmds, $1)='for i in `cat $export_symbols`; do printf "%s %s\\n" -exported_symbol "\$i" >> $lib.exp; done; printf "%s\\n" "-hidden">> $lib.exp~ $CC -shared${allow_undefined_flag} ${wl}-input ${wl}$lib.exp $compiler_flags $libobjs $deplibs -soname $soname `test -n "$verstring" && $ECHO "-set_version $verstring"` -update_registry ${output_objdir}/so_locations -o $lib~$RM $lib.exp' # Both c and cxx compiler support -rpath directly _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-rpath $libdir' fi _LT_TAGVAR(archive_cmds_need_lc, $1)='no' _LT_TAGVAR(hardcode_libdir_separator, $1)=: ;; solaris*) _LT_TAGVAR(no_undefined_flag, $1)=' -z defs' if test "$GCC" = yes; then wlarc='${wl}' _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag ${wl}-z ${wl}text ${wl}-h ${wl}$soname -o $lib $libobjs $deplibs $compiler_flags' _LT_TAGVAR(archive_expsym_cmds, $1)='echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~echo "local: *; };" >> $lib.exp~ $CC -shared $pic_flag ${wl}-z ${wl}text ${wl}-M ${wl}$lib.exp ${wl}-h ${wl}$soname -o $lib $libobjs $deplibs $compiler_flags~$RM $lib.exp' else case `$CC -V 2>&1` in *"Compilers 5.0"*) wlarc='' _LT_TAGVAR(archive_cmds, $1)='$LD -G${allow_undefined_flag} -h $soname -o $lib $libobjs $deplibs $linker_flags' _LT_TAGVAR(archive_expsym_cmds, $1)='echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~echo "local: *; };" >> $lib.exp~ $LD -G${allow_undefined_flag} -M $lib.exp -h $soname -o $lib $libobjs $deplibs $linker_flags~$RM $lib.exp' ;; *) wlarc='${wl}' _LT_TAGVAR(archive_cmds, $1)='$CC -G${allow_undefined_flag} -h $soname -o $lib $libobjs $deplibs $compiler_flags' _LT_TAGVAR(archive_expsym_cmds, $1)='echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~echo "local: *; };" >> $lib.exp~ $CC -G${allow_undefined_flag} -M $lib.exp -h $soname -o $lib $libobjs $deplibs $compiler_flags~$RM $lib.exp' ;; esac fi _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-R$libdir' _LT_TAGVAR(hardcode_shlibpath_var, $1)=no case $host_os in solaris2.[[0-5]] | solaris2.[[0-5]].*) ;; *) # The compiler driver will combine and reorder linker options, # but understands `-z linker_flag'. GCC discards it without `$wl', # but is careful enough not to reorder. # Supported since Solaris 2.6 (maybe 2.5.1?) if test "$GCC" = yes; then _LT_TAGVAR(whole_archive_flag_spec, $1)='${wl}-z ${wl}allextract$convenience ${wl}-z ${wl}defaultextract' else _LT_TAGVAR(whole_archive_flag_spec, $1)='-z allextract$convenience -z defaultextract' fi ;; esac _LT_TAGVAR(link_all_deplibs, $1)=yes ;; sunos4*) if test "x$host_vendor" = xsequent; then # Use $CC to link under sequent, because it throws in some extra .o # files that make .init and .fini sections work. _LT_TAGVAR(archive_cmds, $1)='$CC -G ${wl}-h $soname -o $lib $libobjs $deplibs $compiler_flags' else _LT_TAGVAR(archive_cmds, $1)='$LD -assert pure-text -Bstatic -o $lib $libobjs $deplibs $linker_flags' fi _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir' _LT_TAGVAR(hardcode_direct, $1)=yes _LT_TAGVAR(hardcode_minus_L, $1)=yes _LT_TAGVAR(hardcode_shlibpath_var, $1)=no ;; sysv4) case $host_vendor in sni) _LT_TAGVAR(archive_cmds, $1)='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' _LT_TAGVAR(hardcode_direct, $1)=yes # is this really true??? ;; siemens) ## LD is ld it makes a PLAMLIB ## CC just makes a GrossModule. _LT_TAGVAR(archive_cmds, $1)='$LD -G -o $lib $libobjs $deplibs $linker_flags' _LT_TAGVAR(reload_cmds, $1)='$CC -r -o $output$reload_objs' _LT_TAGVAR(hardcode_direct, $1)=no ;; motorola) _LT_TAGVAR(archive_cmds, $1)='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' _LT_TAGVAR(hardcode_direct, $1)=no #Motorola manual says yes, but my tests say they lie ;; esac runpath_var='LD_RUN_PATH' _LT_TAGVAR(hardcode_shlibpath_var, $1)=no ;; sysv4.3*) _LT_TAGVAR(archive_cmds, $1)='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' _LT_TAGVAR(hardcode_shlibpath_var, $1)=no _LT_TAGVAR(export_dynamic_flag_spec, $1)='-Bexport' ;; sysv4*MP*) if test -d /usr/nec; then _LT_TAGVAR(archive_cmds, $1)='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' _LT_TAGVAR(hardcode_shlibpath_var, $1)=no runpath_var=LD_RUN_PATH hardcode_runpath_var=yes _LT_TAGVAR(ld_shlibs, $1)=yes fi ;; sysv4*uw2* | sysv5OpenUNIX* | sysv5UnixWare7.[[01]].[[10]]* | unixware7* | sco3.2v5.0.[[024]]*) _LT_TAGVAR(no_undefined_flag, $1)='${wl}-z,text' _LT_TAGVAR(archive_cmds_need_lc, $1)=no _LT_TAGVAR(hardcode_shlibpath_var, $1)=no runpath_var='LD_RUN_PATH' if test "$GCC" = yes; then _LT_TAGVAR(archive_cmds, $1)='$CC -shared ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared ${wl}-Bexport:$export_symbols ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' else _LT_TAGVAR(archive_cmds, $1)='$CC -G ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -G ${wl}-Bexport:$export_symbols ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' fi ;; sysv5* | sco3.2v5* | sco5v6*) # Note: We can NOT use -z defs as we might desire, because we do not # link with -lc, and that would cause any symbols used from libc to # always be unresolved, which means just about no library would # ever link correctly. If we're not using GNU ld we use -z text # though, which does catch some bad symbols but isn't as heavy-handed # as -z defs. _LT_TAGVAR(no_undefined_flag, $1)='${wl}-z,text' _LT_TAGVAR(allow_undefined_flag, $1)='${wl}-z,nodefs' _LT_TAGVAR(archive_cmds_need_lc, $1)=no _LT_TAGVAR(hardcode_shlibpath_var, $1)=no _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-R,$libdir' _LT_TAGVAR(hardcode_libdir_separator, $1)=':' _LT_TAGVAR(link_all_deplibs, $1)=yes _LT_TAGVAR(export_dynamic_flag_spec, $1)='${wl}-Bexport' runpath_var='LD_RUN_PATH' if test "$GCC" = yes; then _LT_TAGVAR(archive_cmds, $1)='$CC -shared ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared ${wl}-Bexport:$export_symbols ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' else _LT_TAGVAR(archive_cmds, $1)='$CC -G ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -G ${wl}-Bexport:$export_symbols ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' fi ;; uts4*) _LT_TAGVAR(archive_cmds, $1)='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir' _LT_TAGVAR(hardcode_shlibpath_var, $1)=no ;; *) _LT_TAGVAR(ld_shlibs, $1)=no ;; esac if test x$host_vendor = xsni; then case $host in sysv4 | sysv4.2uw2* | sysv4.3* | sysv5*) _LT_TAGVAR(export_dynamic_flag_spec, $1)='${wl}-Blargedynsym' ;; esac fi fi ]) AC_MSG_RESULT([$_LT_TAGVAR(ld_shlibs, $1)]) test "$_LT_TAGVAR(ld_shlibs, $1)" = no && can_build_shared=no _LT_TAGVAR(with_gnu_ld, $1)=$with_gnu_ld _LT_DECL([], [libext], [0], [Old archive suffix (normally "a")])dnl _LT_DECL([], [shrext_cmds], [1], [Shared library suffix (normally ".so")])dnl _LT_DECL([], [extract_expsyms_cmds], [2], [The commands to extract the exported symbol list from a shared archive]) # # Do we need to explicitly link libc? # case "x$_LT_TAGVAR(archive_cmds_need_lc, $1)" in x|xyes) # Assume -lc should be added _LT_TAGVAR(archive_cmds_need_lc, $1)=yes if test "$enable_shared" = yes && test "$GCC" = yes; then case $_LT_TAGVAR(archive_cmds, $1) in *'~'*) # FIXME: we may have to deal with multi-command sequences. ;; '$CC '*) # Test whether the compiler implicitly links with -lc since on some # systems, -lgcc has to come before -lc. If gcc already passes -lc # to ld, don't add -lc before -lgcc. AC_CACHE_CHECK([whether -lc should be explicitly linked in], [lt_cv_]_LT_TAGVAR(archive_cmds_need_lc, $1), [$RM conftest* echo "$lt_simple_compile_test_code" > conftest.$ac_ext if AC_TRY_EVAL(ac_compile) 2>conftest.err; then soname=conftest lib=conftest libobjs=conftest.$ac_objext deplibs= wl=$_LT_TAGVAR(lt_prog_compiler_wl, $1) pic_flag=$_LT_TAGVAR(lt_prog_compiler_pic, $1) compiler_flags=-v linker_flags=-v verstring= output_objdir=. libname=conftest lt_save_allow_undefined_flag=$_LT_TAGVAR(allow_undefined_flag, $1) _LT_TAGVAR(allow_undefined_flag, $1)= if AC_TRY_EVAL(_LT_TAGVAR(archive_cmds, $1) 2\>\&1 \| $GREP \" -lc \" \>/dev/null 2\>\&1) then lt_cv_[]_LT_TAGVAR(archive_cmds_need_lc, $1)=no else lt_cv_[]_LT_TAGVAR(archive_cmds_need_lc, $1)=yes fi _LT_TAGVAR(allow_undefined_flag, $1)=$lt_save_allow_undefined_flag else cat conftest.err 1>&5 fi $RM conftest* ]) _LT_TAGVAR(archive_cmds_need_lc, $1)=$lt_cv_[]_LT_TAGVAR(archive_cmds_need_lc, $1) ;; esac fi ;; esac _LT_TAGDECL([build_libtool_need_lc], [archive_cmds_need_lc], [0], [Whether or not to add -lc for building shared libraries]) _LT_TAGDECL([allow_libtool_libs_with_static_runtimes], [enable_shared_with_static_runtimes], [0], [Whether or not to disallow shared libs when runtime libs are static]) _LT_TAGDECL([], [export_dynamic_flag_spec], [1], [Compiler flag to allow reflexive dlopens]) _LT_TAGDECL([], [whole_archive_flag_spec], [1], [Compiler flag to generate shared objects directly from archives]) _LT_TAGDECL([], [compiler_needs_object], [1], [Whether the compiler copes with passing no objects directly]) _LT_TAGDECL([], [old_archive_from_new_cmds], [2], [Create an old-style archive from a shared archive]) _LT_TAGDECL([], [old_archive_from_expsyms_cmds], [2], [Create a temporary old-style archive to link instead of a shared archive]) _LT_TAGDECL([], [archive_cmds], [2], [Commands used to build a shared archive]) _LT_TAGDECL([], [archive_expsym_cmds], [2]) _LT_TAGDECL([], [module_cmds], [2], [Commands used to build a loadable module if different from building a shared archive.]) _LT_TAGDECL([], [module_expsym_cmds], [2]) _LT_TAGDECL([], [with_gnu_ld], [1], [Whether we are building with GNU ld or not]) _LT_TAGDECL([], [allow_undefined_flag], [1], [Flag that allows shared libraries with undefined symbols to be built]) _LT_TAGDECL([], [no_undefined_flag], [1], [Flag that enforces no undefined symbols]) _LT_TAGDECL([], [hardcode_libdir_flag_spec], [1], [Flag to hardcode $libdir into a binary during linking. This must work even if $libdir does not exist]) _LT_TAGDECL([], [hardcode_libdir_separator], [1], [Whether we need a single "-rpath" flag with a separated argument]) _LT_TAGDECL([], [hardcode_direct], [0], [Set to "yes" if using DIR/libNAME${shared_ext} during linking hardcodes DIR into the resulting binary]) _LT_TAGDECL([], [hardcode_direct_absolute], [0], [Set to "yes" if using DIR/libNAME${shared_ext} during linking hardcodes DIR into the resulting binary and the resulting library dependency is "absolute", i.e impossible to change by setting ${shlibpath_var} if the library is relocated]) _LT_TAGDECL([], [hardcode_minus_L], [0], [Set to "yes" if using the -LDIR flag during linking hardcodes DIR into the resulting binary]) _LT_TAGDECL([], [hardcode_shlibpath_var], [0], [Set to "yes" if using SHLIBPATH_VAR=DIR during linking hardcodes DIR into the resulting binary]) _LT_TAGDECL([], [hardcode_automatic], [0], [Set to "yes" if building a shared library automatically hardcodes DIR into the library and all subsequent libraries and executables linked against it]) _LT_TAGDECL([], [inherit_rpath], [0], [Set to yes if linker adds runtime paths of dependent libraries to runtime path list]) _LT_TAGDECL([], [link_all_deplibs], [0], [Whether libtool must link a program against all its dependency libraries]) _LT_TAGDECL([], [always_export_symbols], [0], [Set to "yes" if exported symbols are required]) _LT_TAGDECL([], [export_symbols_cmds], [2], [The commands to list exported symbols]) _LT_TAGDECL([], [exclude_expsyms], [1], [Symbols that should not be listed in the preloaded symbols]) _LT_TAGDECL([], [include_expsyms], [1], [Symbols that must always be exported]) _LT_TAGDECL([], [prelink_cmds], [2], [Commands necessary for linking programs (against libraries) with templates]) _LT_TAGDECL([], [postlink_cmds], [2], [Commands necessary for finishing linking programs]) _LT_TAGDECL([], [file_list_spec], [1], [Specify filename containing input files]) dnl FIXME: Not yet implemented dnl _LT_TAGDECL([], [thread_safe_flag_spec], [1], dnl [Compiler flag to generate thread safe objects]) ])# _LT_LINKER_SHLIBS # _LT_LANG_C_CONFIG([TAG]) # ------------------------ # Ensure that the configuration variables for a C compiler are suitably # defined. These variables are subsequently used by _LT_CONFIG to write # the compiler configuration to `libtool'. m4_defun([_LT_LANG_C_CONFIG], [m4_require([_LT_DECL_EGREP])dnl lt_save_CC="$CC" AC_LANG_PUSH(C) # Source file extension for C test sources. ac_ext=c # Object file extension for compiled C test sources. objext=o _LT_TAGVAR(objext, $1)=$objext # Code to be used in simple compile tests lt_simple_compile_test_code="int some_variable = 0;" # Code to be used in simple link tests lt_simple_link_test_code='int main(){return(0);}' _LT_TAG_COMPILER # Save the default compiler, since it gets overwritten when the other # tags are being tested, and _LT_TAGVAR(compiler, []) is a NOP. compiler_DEFAULT=$CC # save warnings/boilerplate of simple test code _LT_COMPILER_BOILERPLATE _LT_LINKER_BOILERPLATE ## CAVEAT EMPTOR: ## There is no encapsulation within the following macros, do not change ## the running order or otherwise move them around unless you know exactly ## what you are doing... if test -n "$compiler"; then _LT_COMPILER_NO_RTTI($1) _LT_COMPILER_PIC($1) _LT_COMPILER_C_O($1) _LT_COMPILER_FILE_LOCKS($1) _LT_LINKER_SHLIBS($1) _LT_SYS_DYNAMIC_LINKER($1) _LT_LINKER_HARDCODE_LIBPATH($1) LT_SYS_DLOPEN_SELF _LT_CMD_STRIPLIB # Report which library types will actually be built AC_MSG_CHECKING([if libtool supports shared libraries]) AC_MSG_RESULT([$can_build_shared]) AC_MSG_CHECKING([whether to build shared libraries]) test "$can_build_shared" = "no" && enable_shared=no # On AIX, shared libraries and static libraries use the same namespace, and # are all built from PIC. case $host_os in aix3*) test "$enable_shared" = yes && enable_static=no if test -n "$RANLIB"; then archive_cmds="$archive_cmds~\$RANLIB \$lib" postinstall_cmds='$RANLIB $lib' fi ;; aix[[4-9]]*) if test "$host_cpu" != ia64 && test "$aix_use_runtimelinking" = no ; then test "$enable_shared" = yes && enable_static=no fi ;; esac AC_MSG_RESULT([$enable_shared]) AC_MSG_CHECKING([whether to build static libraries]) # Make sure either enable_shared or enable_static is yes. test "$enable_shared" = yes || enable_static=yes AC_MSG_RESULT([$enable_static]) _LT_CONFIG($1) fi AC_LANG_POP CC="$lt_save_CC" ])# _LT_LANG_C_CONFIG # _LT_LANG_CXX_CONFIG([TAG]) # -------------------------- # Ensure that the configuration variables for a C++ compiler are suitably # defined. These variables are subsequently used by _LT_CONFIG to write # the compiler configuration to `libtool'. m4_defun([_LT_LANG_CXX_CONFIG], [m4_require([_LT_FILEUTILS_DEFAULTS])dnl m4_require([_LT_DECL_EGREP])dnl m4_require([_LT_PATH_MANIFEST_TOOL])dnl if test -n "$CXX" && ( test "X$CXX" != "Xno" && ( (test "X$CXX" = "Xg++" && `g++ -v >/dev/null 2>&1` ) || (test "X$CXX" != "Xg++"))) ; then AC_PROG_CXXCPP else _lt_caught_CXX_error=yes fi AC_LANG_PUSH(C++) _LT_TAGVAR(archive_cmds_need_lc, $1)=no _LT_TAGVAR(allow_undefined_flag, $1)= _LT_TAGVAR(always_export_symbols, $1)=no _LT_TAGVAR(archive_expsym_cmds, $1)= _LT_TAGVAR(compiler_needs_object, $1)=no _LT_TAGVAR(export_dynamic_flag_spec, $1)= _LT_TAGVAR(hardcode_direct, $1)=no _LT_TAGVAR(hardcode_direct_absolute, $1)=no _LT_TAGVAR(hardcode_libdir_flag_spec, $1)= _LT_TAGVAR(hardcode_libdir_separator, $1)= _LT_TAGVAR(hardcode_minus_L, $1)=no _LT_TAGVAR(hardcode_shlibpath_var, $1)=unsupported _LT_TAGVAR(hardcode_automatic, $1)=no _LT_TAGVAR(inherit_rpath, $1)=no _LT_TAGVAR(module_cmds, $1)= _LT_TAGVAR(module_expsym_cmds, $1)= _LT_TAGVAR(link_all_deplibs, $1)=unknown _LT_TAGVAR(old_archive_cmds, $1)=$old_archive_cmds _LT_TAGVAR(reload_flag, $1)=$reload_flag _LT_TAGVAR(reload_cmds, $1)=$reload_cmds _LT_TAGVAR(no_undefined_flag, $1)= _LT_TAGVAR(whole_archive_flag_spec, $1)= _LT_TAGVAR(enable_shared_with_static_runtimes, $1)=no # Source file extension for C++ test sources. ac_ext=cpp # Object file extension for compiled C++ test sources. objext=o _LT_TAGVAR(objext, $1)=$objext # No sense in running all these tests if we already determined that # the CXX compiler isn't working. Some variables (like enable_shared) # are currently assumed to apply to all compilers on this platform, # and will be corrupted by setting them based on a non-working compiler. if test "$_lt_caught_CXX_error" != yes; then # Code to be used in simple compile tests lt_simple_compile_test_code="int some_variable = 0;" # Code to be used in simple link tests lt_simple_link_test_code='int main(int, char *[[]]) { return(0); }' # ltmain only uses $CC for tagged configurations so make sure $CC is set. _LT_TAG_COMPILER # save warnings/boilerplate of simple test code _LT_COMPILER_BOILERPLATE _LT_LINKER_BOILERPLATE # Allow CC to be a program name with arguments. lt_save_CC=$CC lt_save_CFLAGS=$CFLAGS lt_save_LD=$LD lt_save_GCC=$GCC GCC=$GXX lt_save_with_gnu_ld=$with_gnu_ld lt_save_path_LD=$lt_cv_path_LD if test -n "${lt_cv_prog_gnu_ldcxx+set}"; then lt_cv_prog_gnu_ld=$lt_cv_prog_gnu_ldcxx else $as_unset lt_cv_prog_gnu_ld fi if test -n "${lt_cv_path_LDCXX+set}"; then lt_cv_path_LD=$lt_cv_path_LDCXX else $as_unset lt_cv_path_LD fi test -z "${LDCXX+set}" || LD=$LDCXX CC=${CXX-"c++"} CFLAGS=$CXXFLAGS compiler=$CC _LT_TAGVAR(compiler, $1)=$CC _LT_CC_BASENAME([$compiler]) if test -n "$compiler"; then # We don't want -fno-exception when compiling C++ code, so set the # no_builtin_flag separately if test "$GXX" = yes; then _LT_TAGVAR(lt_prog_compiler_no_builtin_flag, $1)=' -fno-builtin' else _LT_TAGVAR(lt_prog_compiler_no_builtin_flag, $1)= fi if test "$GXX" = yes; then # Set up default GNU C++ configuration LT_PATH_LD # Check if GNU C++ uses GNU ld as the underlying linker, since the # archiving commands below assume that GNU ld is being used. if test "$with_gnu_ld" = yes; then _LT_TAGVAR(archive_cmds, $1)='$CC $pic_flag -shared -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname $wl$soname -o $lib' _LT_TAGVAR(archive_expsym_cmds, $1)='$CC $pic_flag -shared -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath ${wl}$libdir' _LT_TAGVAR(export_dynamic_flag_spec, $1)='${wl}--export-dynamic' # If archive_cmds runs LD, not CC, wlarc should be empty # XXX I think wlarc can be eliminated in ltcf-cxx, but I need to # investigate it a little bit more. (MM) wlarc='${wl}' # ancient GNU ld didn't support --whole-archive et. al. if eval "`$CC -print-prog-name=ld` --help 2>&1" | $GREP 'no-whole-archive' > /dev/null; then _LT_TAGVAR(whole_archive_flag_spec, $1)="$wlarc"'--whole-archive$convenience '"$wlarc"'--no-whole-archive' else _LT_TAGVAR(whole_archive_flag_spec, $1)= fi else with_gnu_ld=no wlarc= # A generic and very simple default shared library creation # command for GNU C++ for the case where it uses the native # linker, instead of GNU ld. If possible, this setting should # overridden to take advantage of the native linker features on # the platform it is being used on. _LT_TAGVAR(archive_cmds, $1)='$CC -shared -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -o $lib' fi # Commands to make compiler produce verbose output that lists # what "hidden" libraries, object files and flags are used when # linking a shared library. output_verbose_link_cmd='$CC -shared $CFLAGS -v conftest.$objext 2>&1 | $GREP -v "^Configured with:" | $GREP "\-L"' else GXX=no with_gnu_ld=no wlarc= fi # PORTME: fill in a description of your system's C++ link characteristics AC_MSG_CHECKING([whether the $compiler linker ($LD) supports shared libraries]) _LT_TAGVAR(ld_shlibs, $1)=yes case $host_os in aix3*) # FIXME: insert proper C++ library support _LT_TAGVAR(ld_shlibs, $1)=no ;; aix[[4-9]]*) if test "$host_cpu" = ia64; then # On IA64, the linker does run time linking by default, so we don't # have to do anything special. aix_use_runtimelinking=no exp_sym_flag='-Bexport' no_entry_flag="" else aix_use_runtimelinking=no # Test if we are trying to use run time linking or normal # AIX style linking. If -brtl is somewhere in LDFLAGS, we # need to do runtime linking. case $host_os in aix4.[[23]]|aix4.[[23]].*|aix[[5-9]]*) for ld_flag in $LDFLAGS; do case $ld_flag in *-brtl*) aix_use_runtimelinking=yes break ;; esac done ;; esac exp_sym_flag='-bexport' no_entry_flag='-bnoentry' fi # When large executables or shared objects are built, AIX ld can # have problems creating the table of contents. If linking a library # or program results in "error TOC overflow" add -mminimal-toc to # CXXFLAGS/CFLAGS for g++/gcc. In the cases where that is not # enough to fix the problem, add -Wl,-bbigtoc to LDFLAGS. _LT_TAGVAR(archive_cmds, $1)='' _LT_TAGVAR(hardcode_direct, $1)=yes _LT_TAGVAR(hardcode_direct_absolute, $1)=yes _LT_TAGVAR(hardcode_libdir_separator, $1)=':' _LT_TAGVAR(link_all_deplibs, $1)=yes _LT_TAGVAR(file_list_spec, $1)='${wl}-f,' if test "$GXX" = yes; then case $host_os in aix4.[[012]]|aix4.[[012]].*) # We only want to do this on AIX 4.2 and lower, the check # below for broken collect2 doesn't work under 4.3+ collect2name=`${CC} -print-prog-name=collect2` if test -f "$collect2name" && strings "$collect2name" | $GREP resolve_lib_name >/dev/null then # We have reworked collect2 : else # We have old collect2 _LT_TAGVAR(hardcode_direct, $1)=unsupported # It fails to find uninstalled libraries when the uninstalled # path is not listed in the libpath. Setting hardcode_minus_L # to unsupported forces relinking _LT_TAGVAR(hardcode_minus_L, $1)=yes _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir' _LT_TAGVAR(hardcode_libdir_separator, $1)= fi esac shared_flag='-shared' if test "$aix_use_runtimelinking" = yes; then shared_flag="$shared_flag "'${wl}-G' fi else # not using gcc if test "$host_cpu" = ia64; then # VisualAge C++, Version 5.5 for AIX 5L for IA-64, Beta 3 Release # chokes on -Wl,-G. The following line is correct: shared_flag='-G' else if test "$aix_use_runtimelinking" = yes; then shared_flag='${wl}-G' else shared_flag='${wl}-bM:SRE' fi fi fi _LT_TAGVAR(export_dynamic_flag_spec, $1)='${wl}-bexpall' # It seems that -bexpall does not export symbols beginning with # underscore (_), so it is better to generate a list of symbols to # export. _LT_TAGVAR(always_export_symbols, $1)=yes if test "$aix_use_runtimelinking" = yes; then # Warning - without using the other runtime loading flags (-brtl), # -berok will link without error, but may produce a broken library. _LT_TAGVAR(allow_undefined_flag, $1)='-berok' # Determine the default libpath from the value encoded in an empty # executable. _LT_SYS_MODULE_PATH_AIX([$1]) _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-blibpath:$libdir:'"$aix_libpath" _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -o $output_objdir/$soname $libobjs $deplibs '"\${wl}$no_entry_flag"' $compiler_flags `if test "x${allow_undefined_flag}" != "x"; then func_echo_all "${wl}${allow_undefined_flag}"; else :; fi` '"\${wl}$exp_sym_flag:\$export_symbols $shared_flag" else if test "$host_cpu" = ia64; then _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-R $libdir:/usr/lib:/lib' _LT_TAGVAR(allow_undefined_flag, $1)="-z nodefs" _LT_TAGVAR(archive_expsym_cmds, $1)="\$CC $shared_flag"' -o $output_objdir/$soname $libobjs $deplibs '"\${wl}$no_entry_flag"' $compiler_flags ${wl}${allow_undefined_flag} '"\${wl}$exp_sym_flag:\$export_symbols" else # Determine the default libpath from the value encoded in an # empty executable. _LT_SYS_MODULE_PATH_AIX([$1]) _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-blibpath:$libdir:'"$aix_libpath" # Warning - without using the other run time loading flags, # -berok will link without error, but may produce a broken library. _LT_TAGVAR(no_undefined_flag, $1)=' ${wl}-bernotok' _LT_TAGVAR(allow_undefined_flag, $1)=' ${wl}-berok' if test "$with_gnu_ld" = yes; then # We only use this code for GNU lds that support --whole-archive. _LT_TAGVAR(whole_archive_flag_spec, $1)='${wl}--whole-archive$convenience ${wl}--no-whole-archive' else # Exported symbols can be pulled into shared objects from archives _LT_TAGVAR(whole_archive_flag_spec, $1)='$convenience' fi _LT_TAGVAR(archive_cmds_need_lc, $1)=yes # This is similar to how AIX traditionally builds its shared # libraries. _LT_TAGVAR(archive_expsym_cmds, $1)="\$CC $shared_flag"' -o $output_objdir/$soname $libobjs $deplibs ${wl}-bnoentry $compiler_flags ${wl}-bE:$export_symbols${allow_undefined_flag}~$AR $AR_FLAGS $output_objdir/$libname$release.a $output_objdir/$soname' fi fi ;; beos*) if $LD --help 2>&1 | $GREP ': supported targets:.* elf' > /dev/null; then _LT_TAGVAR(allow_undefined_flag, $1)=unsupported # Joseph Beckenbach says some releases of gcc # support --undefined. This deserves some investigation. FIXME _LT_TAGVAR(archive_cmds, $1)='$CC -nostart $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' else _LT_TAGVAR(ld_shlibs, $1)=no fi ;; chorus*) case $cc_basename in *) # FIXME: insert proper C++ library support _LT_TAGVAR(ld_shlibs, $1)=no ;; esac ;; cygwin* | mingw* | pw32* | cegcc*) case $GXX,$cc_basename in ,cl* | no,cl*) # Native MSVC # hardcode_libdir_flag_spec is actually meaningless, as there is # no search path for DLLs. _LT_TAGVAR(hardcode_libdir_flag_spec, $1)=' ' _LT_TAGVAR(allow_undefined_flag, $1)=unsupported _LT_TAGVAR(always_export_symbols, $1)=yes _LT_TAGVAR(file_list_spec, $1)='@' # Tell ltmain to make .lib files, not .a files. libext=lib # Tell ltmain to make .dll files, not .so files. shrext_cmds=".dll" # FIXME: Setting linknames here is a bad hack. _LT_TAGVAR(archive_cmds, $1)='$CC -o $output_objdir/$soname $libobjs $compiler_flags $deplibs -Wl,-dll~linknames=' _LT_TAGVAR(archive_expsym_cmds, $1)='if test "x`$SED 1q $export_symbols`" = xEXPORTS; then $SED -n -e 's/\\\\\\\(.*\\\\\\\)/-link\\\ -EXPORT:\\\\\\\1/' -e '1\\\!p' < $export_symbols > $output_objdir/$soname.exp; else $SED -e 's/\\\\\\\(.*\\\\\\\)/-link\\\ -EXPORT:\\\\\\\1/' < $export_symbols > $output_objdir/$soname.exp; fi~ $CC -o $tool_output_objdir$soname $libobjs $compiler_flags $deplibs "@$tool_output_objdir$soname.exp" -Wl,-DLL,-IMPLIB:"$tool_output_objdir$libname.dll.lib"~ linknames=' # The linker will not automatically build a static lib if we build a DLL. # _LT_TAGVAR(old_archive_from_new_cmds, $1)='true' _LT_TAGVAR(enable_shared_with_static_runtimes, $1)=yes # Don't use ranlib _LT_TAGVAR(old_postinstall_cmds, $1)='chmod 644 $oldlib' _LT_TAGVAR(postlink_cmds, $1)='lt_outputfile="@OUTPUT@"~ lt_tool_outputfile="@TOOL_OUTPUT@"~ case $lt_outputfile in *.exe|*.EXE) ;; *) lt_outputfile="$lt_outputfile.exe" lt_tool_outputfile="$lt_tool_outputfile.exe" ;; esac~ func_to_tool_file "$lt_outputfile"~ if test "$MANIFEST_TOOL" != ":" && test -f "$lt_outputfile.manifest"; then $MANIFEST_TOOL -manifest "$lt_tool_outputfile.manifest" -outputresource:"$lt_tool_outputfile" || exit 1; $RM "$lt_outputfile.manifest"; fi' ;; *) # g++ # _LT_TAGVAR(hardcode_libdir_flag_spec, $1) is actually meaningless, # as there is no search path for DLLs. _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir' _LT_TAGVAR(export_dynamic_flag_spec, $1)='${wl}--export-all-symbols' _LT_TAGVAR(allow_undefined_flag, $1)=unsupported _LT_TAGVAR(always_export_symbols, $1)=no _LT_TAGVAR(enable_shared_with_static_runtimes, $1)=yes if $LD --help 2>&1 | $GREP 'auto-import' > /dev/null; then _LT_TAGVAR(archive_cmds, $1)='$CC -shared -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -o $output_objdir/$soname ${wl}--enable-auto-image-base -Xlinker --out-implib -Xlinker $lib' # If the export-symbols file already is a .def file (1st line # is EXPORTS), use it as is; otherwise, prepend... _LT_TAGVAR(archive_expsym_cmds, $1)='if test "x`$SED 1q $export_symbols`" = xEXPORTS; then cp $export_symbols $output_objdir/$soname.def; else echo EXPORTS > $output_objdir/$soname.def; cat $export_symbols >> $output_objdir/$soname.def; fi~ $CC -shared -nostdlib $output_objdir/$soname.def $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -o $output_objdir/$soname ${wl}--enable-auto-image-base -Xlinker --out-implib -Xlinker $lib' else _LT_TAGVAR(ld_shlibs, $1)=no fi ;; esac ;; darwin* | rhapsody*) _LT_DARWIN_LINKER_FEATURES($1) ;; dgux*) case $cc_basename in ec++*) # FIXME: insert proper C++ library support _LT_TAGVAR(ld_shlibs, $1)=no ;; ghcx*) # Green Hills C++ Compiler # FIXME: insert proper C++ library support _LT_TAGVAR(ld_shlibs, $1)=no ;; *) # FIXME: insert proper C++ library support _LT_TAGVAR(ld_shlibs, $1)=no ;; esac ;; freebsd2.*) # C++ shared libraries reported to be fairly broken before # switch to ELF _LT_TAGVAR(ld_shlibs, $1)=no ;; freebsd-elf*) _LT_TAGVAR(archive_cmds_need_lc, $1)=no ;; freebsd* | dragonfly*) # FreeBSD 3 and later use GNU C++ and GNU ld with standard ELF # conventions _LT_TAGVAR(ld_shlibs, $1)=yes ;; gnu*) ;; haiku*) _LT_TAGVAR(archive_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' _LT_TAGVAR(link_all_deplibs, $1)=yes ;; hpux9*) _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}+b ${wl}$libdir' _LT_TAGVAR(hardcode_libdir_separator, $1)=: _LT_TAGVAR(export_dynamic_flag_spec, $1)='${wl}-E' _LT_TAGVAR(hardcode_direct, $1)=yes _LT_TAGVAR(hardcode_minus_L, $1)=yes # Not in the search PATH, # but as the default # location of the library. case $cc_basename in CC*) # FIXME: insert proper C++ library support _LT_TAGVAR(ld_shlibs, $1)=no ;; aCC*) _LT_TAGVAR(archive_cmds, $1)='$RM $output_objdir/$soname~$CC -b ${wl}+b ${wl}$install_libdir -o $output_objdir/$soname $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags~test $output_objdir/$soname = $lib || mv $output_objdir/$soname $lib' # Commands to make compiler produce verbose output that lists # what "hidden" libraries, object files and flags are used when # linking a shared library. # # There doesn't appear to be a way to prevent this compiler from # explicitly linking system object files so we need to strip them # from the output so that they don't get included in the library # dependencies. output_verbose_link_cmd='templist=`($CC -b $CFLAGS -v conftest.$objext 2>&1) | $EGREP "\-L"`; list=""; for z in $templist; do case $z in conftest.$objext) list="$list $z";; *.$objext);; *) list="$list $z";;esac; done; func_echo_all "$list"' ;; *) if test "$GXX" = yes; then _LT_TAGVAR(archive_cmds, $1)='$RM $output_objdir/$soname~$CC -shared -nostdlib $pic_flag ${wl}+b ${wl}$install_libdir -o $output_objdir/$soname $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags~test $output_objdir/$soname = $lib || mv $output_objdir/$soname $lib' else # FIXME: insert proper C++ library support _LT_TAGVAR(ld_shlibs, $1)=no fi ;; esac ;; hpux10*|hpux11*) if test $with_gnu_ld = no; then _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}+b ${wl}$libdir' _LT_TAGVAR(hardcode_libdir_separator, $1)=: case $host_cpu in hppa*64*|ia64*) ;; *) _LT_TAGVAR(export_dynamic_flag_spec, $1)='${wl}-E' ;; esac fi case $host_cpu in hppa*64*|ia64*) _LT_TAGVAR(hardcode_direct, $1)=no _LT_TAGVAR(hardcode_shlibpath_var, $1)=no ;; *) _LT_TAGVAR(hardcode_direct, $1)=yes _LT_TAGVAR(hardcode_direct_absolute, $1)=yes _LT_TAGVAR(hardcode_minus_L, $1)=yes # Not in the search PATH, # but as the default # location of the library. ;; esac case $cc_basename in CC*) # FIXME: insert proper C++ library support _LT_TAGVAR(ld_shlibs, $1)=no ;; aCC*) case $host_cpu in hppa*64*) _LT_TAGVAR(archive_cmds, $1)='$CC -b ${wl}+h ${wl}$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' ;; ia64*) _LT_TAGVAR(archive_cmds, $1)='$CC -b ${wl}+h ${wl}$soname ${wl}+nodefaultrpath -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' ;; *) _LT_TAGVAR(archive_cmds, $1)='$CC -b ${wl}+h ${wl}$soname ${wl}+b ${wl}$install_libdir -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' ;; esac # Commands to make compiler produce verbose output that lists # what "hidden" libraries, object files and flags are used when # linking a shared library. # # There doesn't appear to be a way to prevent this compiler from # explicitly linking system object files so we need to strip them # from the output so that they don't get included in the library # dependencies. output_verbose_link_cmd='templist=`($CC -b $CFLAGS -v conftest.$objext 2>&1) | $GREP "\-L"`; list=""; for z in $templist; do case $z in conftest.$objext) list="$list $z";; *.$objext);; *) list="$list $z";;esac; done; func_echo_all "$list"' ;; *) if test "$GXX" = yes; then if test $with_gnu_ld = no; then case $host_cpu in hppa*64*) _LT_TAGVAR(archive_cmds, $1)='$CC -shared -nostdlib -fPIC ${wl}+h ${wl}$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' ;; ia64*) _LT_TAGVAR(archive_cmds, $1)='$CC -shared -nostdlib $pic_flag ${wl}+h ${wl}$soname ${wl}+nodefaultrpath -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' ;; *) _LT_TAGVAR(archive_cmds, $1)='$CC -shared -nostdlib $pic_flag ${wl}+h ${wl}$soname ${wl}+b ${wl}$install_libdir -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' ;; esac fi else # FIXME: insert proper C++ library support _LT_TAGVAR(ld_shlibs, $1)=no fi ;; esac ;; interix[[3-9]]*) _LT_TAGVAR(hardcode_direct, $1)=no _LT_TAGVAR(hardcode_shlibpath_var, $1)=no _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath,$libdir' _LT_TAGVAR(export_dynamic_flag_spec, $1)='${wl}-E' # Hack: On Interix 3.x, we cannot compile PIC because of a broken gcc. # Instead, shared libraries are loaded at an image base (0x10000000 by # default) and relocated if they conflict, which is a slow very memory # consuming and fragmenting process. To avoid this, we pick a random, # 256 KiB-aligned image base between 0x50000000 and 0x6FFC0000 at link # time. Moving up from 0x10000000 also allows more sbrk(2) space. _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-h,$soname ${wl}--image-base,`expr ${RANDOM-$$} % 4096 / 2 \* 262144 + 1342177280` -o $lib' _LT_TAGVAR(archive_expsym_cmds, $1)='sed "s,^,_," $export_symbols >$output_objdir/$soname.expsym~$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-h,$soname ${wl}--retain-symbols-file,$output_objdir/$soname.expsym ${wl}--image-base,`expr ${RANDOM-$$} % 4096 / 2 \* 262144 + 1342177280` -o $lib' ;; irix5* | irix6*) case $cc_basename in CC*) # SGI C++ _LT_TAGVAR(archive_cmds, $1)='$CC -shared -all -multigot $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -soname $soname `test -n "$verstring" && func_echo_all "-set_version $verstring"` -update_registry ${output_objdir}/so_locations -o $lib' # Archives containing C++ object files must be created using # "CC -ar", where "CC" is the IRIX C++ compiler. This is # necessary to make sure instantiated templates are included # in the archive. _LT_TAGVAR(old_archive_cmds, $1)='$CC -ar -WR,-u -o $oldlib $oldobjs' ;; *) if test "$GXX" = yes; then if test "$with_gnu_ld" = no; then _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && func_echo_all "${wl}-set_version ${wl}$verstring"` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib' else _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && func_echo_all "${wl}-set_version ${wl}$verstring"` -o $lib' fi fi _LT_TAGVAR(link_all_deplibs, $1)=yes ;; esac _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath ${wl}$libdir' _LT_TAGVAR(hardcode_libdir_separator, $1)=: _LT_TAGVAR(inherit_rpath, $1)=yes ;; linux* | k*bsd*-gnu | kopensolaris*-gnu) case $cc_basename in KCC*) # Kuck and Associates, Inc. (KAI) C++ Compiler # KCC will only create a shared library if the output file # ends with ".so" (or ".sl" for HP-UX), so rename the library # to its proper name (with version) after linking. _LT_TAGVAR(archive_cmds, $1)='tempext=`echo $shared_ext | $SED -e '\''s/\([[^()0-9A-Za-z{}]]\)/\\\\\1/g'\''`; templib=`echo $lib | $SED -e "s/\${tempext}\..*/.so/"`; $CC $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags --soname $soname -o \$templib; mv \$templib $lib' _LT_TAGVAR(archive_expsym_cmds, $1)='tempext=`echo $shared_ext | $SED -e '\''s/\([[^()0-9A-Za-z{}]]\)/\\\\\1/g'\''`; templib=`echo $lib | $SED -e "s/\${tempext}\..*/.so/"`; $CC $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags --soname $soname -o \$templib ${wl}-retain-symbols-file,$export_symbols; mv \$templib $lib' # Commands to make compiler produce verbose output that lists # what "hidden" libraries, object files and flags are used when # linking a shared library. # # There doesn't appear to be a way to prevent this compiler from # explicitly linking system object files so we need to strip them # from the output so that they don't get included in the library # dependencies. output_verbose_link_cmd='templist=`$CC $CFLAGS -v conftest.$objext -o libconftest$shared_ext 2>&1 | $GREP "ld"`; rm -f libconftest$shared_ext; list=""; for z in $templist; do case $z in conftest.$objext) list="$list $z";; *.$objext);; *) list="$list $z";;esac; done; func_echo_all "$list"' _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath,$libdir' _LT_TAGVAR(export_dynamic_flag_spec, $1)='${wl}--export-dynamic' # Archives containing C++ object files must be created using # "CC -Bstatic", where "CC" is the KAI C++ compiler. _LT_TAGVAR(old_archive_cmds, $1)='$CC -Bstatic -o $oldlib $oldobjs' ;; icpc* | ecpc* ) # Intel C++ with_gnu_ld=yes # version 8.0 and above of icpc choke on multiply defined symbols # if we add $predep_objects and $postdep_objects, however 7.1 and # earlier do not add the objects themselves. case `$CC -V 2>&1` in *"Version 7."*) _LT_TAGVAR(archive_cmds, $1)='$CC -shared $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname $wl$soname -o $lib' _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' ;; *) # Version 8.0 or newer tmp_idyn= case $host_cpu in ia64*) tmp_idyn=' -i_dynamic';; esac _LT_TAGVAR(archive_cmds, $1)='$CC -shared'"$tmp_idyn"' $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared'"$tmp_idyn"' $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' ;; esac _LT_TAGVAR(archive_cmds_need_lc, $1)=no _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath,$libdir' _LT_TAGVAR(export_dynamic_flag_spec, $1)='${wl}--export-dynamic' _LT_TAGVAR(whole_archive_flag_spec, $1)='${wl}--whole-archive$convenience ${wl}--no-whole-archive' ;; pgCC* | pgcpp*) # Portland Group C++ compiler case `$CC -V` in *pgCC\ [[1-5]].* | *pgcpp\ [[1-5]].*) _LT_TAGVAR(prelink_cmds, $1)='tpldir=Template.dir~ rm -rf $tpldir~ $CC --prelink_objects --instantiation_dir $tpldir $objs $libobjs $compile_deplibs~ compile_command="$compile_command `find $tpldir -name \*.o | sort | $NL2SP`"' _LT_TAGVAR(old_archive_cmds, $1)='tpldir=Template.dir~ rm -rf $tpldir~ $CC --prelink_objects --instantiation_dir $tpldir $oldobjs$old_deplibs~ $AR $AR_FLAGS $oldlib$oldobjs$old_deplibs `find $tpldir -name \*.o | sort | $NL2SP`~ $RANLIB $oldlib' _LT_TAGVAR(archive_cmds, $1)='tpldir=Template.dir~ rm -rf $tpldir~ $CC --prelink_objects --instantiation_dir $tpldir $predep_objects $libobjs $deplibs $convenience $postdep_objects~ $CC -shared $pic_flag $predep_objects $libobjs $deplibs `find $tpldir -name \*.o | sort | $NL2SP` $postdep_objects $compiler_flags ${wl}-soname ${wl}$soname -o $lib' _LT_TAGVAR(archive_expsym_cmds, $1)='tpldir=Template.dir~ rm -rf $tpldir~ $CC --prelink_objects --instantiation_dir $tpldir $predep_objects $libobjs $deplibs $convenience $postdep_objects~ $CC -shared $pic_flag $predep_objects $libobjs $deplibs `find $tpldir -name \*.o | sort | $NL2SP` $postdep_objects $compiler_flags ${wl}-soname ${wl}$soname ${wl}-retain-symbols-file ${wl}$export_symbols -o $lib' ;; *) # Version 6 and above use weak symbols _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname ${wl}$soname -o $lib' _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $pic_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname ${wl}$soname ${wl}-retain-symbols-file ${wl}$export_symbols -o $lib' ;; esac _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}--rpath ${wl}$libdir' _LT_TAGVAR(export_dynamic_flag_spec, $1)='${wl}--export-dynamic' _LT_TAGVAR(whole_archive_flag_spec, $1)='${wl}--whole-archive`for conv in $convenience\"\"; do test -n \"$conv\" && new_convenience=\"$new_convenience,$conv\"; done; func_echo_all \"$new_convenience\"` ${wl}--no-whole-archive' ;; cxx*) # Compaq C++ _LT_TAGVAR(archive_cmds, $1)='$CC -shared $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname $wl$soname -o $lib' _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname $wl$soname -o $lib ${wl}-retain-symbols-file $wl$export_symbols' runpath_var=LD_RUN_PATH _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-rpath $libdir' _LT_TAGVAR(hardcode_libdir_separator, $1)=: # Commands to make compiler produce verbose output that lists # what "hidden" libraries, object files and flags are used when # linking a shared library. # # There doesn't appear to be a way to prevent this compiler from # explicitly linking system object files so we need to strip them # from the output so that they don't get included in the library # dependencies. output_verbose_link_cmd='templist=`$CC -shared $CFLAGS -v conftest.$objext 2>&1 | $GREP "ld"`; templist=`func_echo_all "$templist" | $SED "s/\(^.*ld.*\)\( .*ld .*$\)/\1/"`; list=""; for z in $templist; do case $z in conftest.$objext) list="$list $z";; *.$objext);; *) list="$list $z";;esac; done; func_echo_all "X$list" | $Xsed' ;; xl* | mpixl* | bgxl*) # IBM XL 8.0 on PPC, with GNU ld _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath ${wl}$libdir' _LT_TAGVAR(export_dynamic_flag_spec, $1)='${wl}--export-dynamic' _LT_TAGVAR(archive_cmds, $1)='$CC -qmkshrobj $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' if test "x$supports_anon_versioning" = xyes; then _LT_TAGVAR(archive_expsym_cmds, $1)='echo "{ global:" > $output_objdir/$libname.ver~ cat $export_symbols | sed -e "s/\(.*\)/\1;/" >> $output_objdir/$libname.ver~ echo "local: *; };" >> $output_objdir/$libname.ver~ $CC -qmkshrobj $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-version-script ${wl}$output_objdir/$libname.ver -o $lib' fi ;; *) case `$CC -V 2>&1 | sed 5q` in *Sun\ C*) # Sun C++ 5.9 _LT_TAGVAR(no_undefined_flag, $1)=' -zdefs' _LT_TAGVAR(archive_cmds, $1)='$CC -G${allow_undefined_flag} -h$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -G${allow_undefined_flag} -h$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-retain-symbols-file ${wl}$export_symbols' _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-R$libdir' _LT_TAGVAR(whole_archive_flag_spec, $1)='${wl}--whole-archive`new_convenience=; for conv in $convenience\"\"; do test -z \"$conv\" || new_convenience=\"$new_convenience,$conv\"; done; func_echo_all \"$new_convenience\"` ${wl}--no-whole-archive' _LT_TAGVAR(compiler_needs_object, $1)=yes # Not sure whether something based on # $CC $CFLAGS -v conftest.$objext -o libconftest$shared_ext 2>&1 # would be better. output_verbose_link_cmd='func_echo_all' # Archives containing C++ object files must be created using # "CC -xar", where "CC" is the Sun C++ compiler. This is # necessary to make sure instantiated templates are included # in the archive. _LT_TAGVAR(old_archive_cmds, $1)='$CC -xar -o $oldlib $oldobjs' ;; esac ;; esac ;; lynxos*) # FIXME: insert proper C++ library support _LT_TAGVAR(ld_shlibs, $1)=no ;; m88k*) # FIXME: insert proper C++ library support _LT_TAGVAR(ld_shlibs, $1)=no ;; mvs*) case $cc_basename in cxx*) # FIXME: insert proper C++ library support _LT_TAGVAR(ld_shlibs, $1)=no ;; *) # FIXME: insert proper C++ library support _LT_TAGVAR(ld_shlibs, $1)=no ;; esac ;; netbsd*) if echo __ELF__ | $CC -E - | $GREP __ELF__ >/dev/null; then _LT_TAGVAR(archive_cmds, $1)='$LD -Bshareable -o $lib $predep_objects $libobjs $deplibs $postdep_objects $linker_flags' wlarc= _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-R$libdir' _LT_TAGVAR(hardcode_direct, $1)=yes _LT_TAGVAR(hardcode_shlibpath_var, $1)=no fi # Workaround some broken pre-1.5 toolchains output_verbose_link_cmd='$CC -shared $CFLAGS -v conftest.$objext 2>&1 | $GREP conftest.$objext | $SED -e "s:-lgcc -lc -lgcc::"' ;; *nto* | *qnx*) _LT_TAGVAR(ld_shlibs, $1)=yes ;; openbsd2*) # C++ shared libraries are fairly broken _LT_TAGVAR(ld_shlibs, $1)=no ;; openbsd*) if test -f /usr/libexec/ld.so; then _LT_TAGVAR(hardcode_direct, $1)=yes _LT_TAGVAR(hardcode_shlibpath_var, $1)=no _LT_TAGVAR(hardcode_direct_absolute, $1)=yes _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -o $lib' _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath,$libdir' if test -z "`echo __ELF__ | $CC -E - | grep __ELF__`" || test "$host_os-$host_cpu" = "openbsd2.8-powerpc"; then _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $pic_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-retain-symbols-file,$export_symbols -o $lib' _LT_TAGVAR(export_dynamic_flag_spec, $1)='${wl}-E' _LT_TAGVAR(whole_archive_flag_spec, $1)="$wlarc"'--whole-archive$convenience '"$wlarc"'--no-whole-archive' fi output_verbose_link_cmd=func_echo_all else _LT_TAGVAR(ld_shlibs, $1)=no fi ;; osf3* | osf4* | osf5*) case $cc_basename in KCC*) # Kuck and Associates, Inc. (KAI) C++ Compiler # KCC will only create a shared library if the output file # ends with ".so" (or ".sl" for HP-UX), so rename the library # to its proper name (with version) after linking. _LT_TAGVAR(archive_cmds, $1)='tempext=`echo $shared_ext | $SED -e '\''s/\([[^()0-9A-Za-z{}]]\)/\\\\\1/g'\''`; templib=`echo "$lib" | $SED -e "s/\${tempext}\..*/.so/"`; $CC $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags --soname $soname -o \$templib; mv \$templib $lib' _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath,$libdir' _LT_TAGVAR(hardcode_libdir_separator, $1)=: # Archives containing C++ object files must be created using # the KAI C++ compiler. case $host in osf3*) _LT_TAGVAR(old_archive_cmds, $1)='$CC -Bstatic -o $oldlib $oldobjs' ;; *) _LT_TAGVAR(old_archive_cmds, $1)='$CC -o $oldlib $oldobjs' ;; esac ;; RCC*) # Rational C++ 2.4.1 # FIXME: insert proper C++ library support _LT_TAGVAR(ld_shlibs, $1)=no ;; cxx*) case $host in osf3*) _LT_TAGVAR(allow_undefined_flag, $1)=' ${wl}-expect_unresolved ${wl}\*' _LT_TAGVAR(archive_cmds, $1)='$CC -shared${allow_undefined_flag} $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname $soname `test -n "$verstring" && func_echo_all "${wl}-set_version $verstring"` -update_registry ${output_objdir}/so_locations -o $lib' _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath ${wl}$libdir' ;; *) _LT_TAGVAR(allow_undefined_flag, $1)=' -expect_unresolved \*' _LT_TAGVAR(archive_cmds, $1)='$CC -shared${allow_undefined_flag} $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -msym -soname $soname `test -n "$verstring" && func_echo_all "-set_version $verstring"` -update_registry ${output_objdir}/so_locations -o $lib' _LT_TAGVAR(archive_expsym_cmds, $1)='for i in `cat $export_symbols`; do printf "%s %s\\n" -exported_symbol "\$i" >> $lib.exp; done~ echo "-hidden">> $lib.exp~ $CC -shared$allow_undefined_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -msym -soname $soname ${wl}-input ${wl}$lib.exp `test -n "$verstring" && $ECHO "-set_version $verstring"` -update_registry ${output_objdir}/so_locations -o $lib~ $RM $lib.exp' _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-rpath $libdir' ;; esac _LT_TAGVAR(hardcode_libdir_separator, $1)=: # Commands to make compiler produce verbose output that lists # what "hidden" libraries, object files and flags are used when # linking a shared library. # # There doesn't appear to be a way to prevent this compiler from # explicitly linking system object files so we need to strip them # from the output so that they don't get included in the library # dependencies. output_verbose_link_cmd='templist=`$CC -shared $CFLAGS -v conftest.$objext 2>&1 | $GREP "ld" | $GREP -v "ld:"`; templist=`func_echo_all "$templist" | $SED "s/\(^.*ld.*\)\( .*ld.*$\)/\1/"`; list=""; for z in $templist; do case $z in conftest.$objext) list="$list $z";; *.$objext);; *) list="$list $z";;esac; done; func_echo_all "$list"' ;; *) if test "$GXX" = yes && test "$with_gnu_ld" = no; then _LT_TAGVAR(allow_undefined_flag, $1)=' ${wl}-expect_unresolved ${wl}\*' case $host in osf3*) _LT_TAGVAR(archive_cmds, $1)='$CC -shared -nostdlib ${allow_undefined_flag} $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && func_echo_all "${wl}-set_version ${wl}$verstring"` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib' ;; *) _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag -nostdlib ${allow_undefined_flag} $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-msym ${wl}-soname ${wl}$soname `test -n "$verstring" && func_echo_all "${wl}-set_version ${wl}$verstring"` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib' ;; esac _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath ${wl}$libdir' _LT_TAGVAR(hardcode_libdir_separator, $1)=: # Commands to make compiler produce verbose output that lists # what "hidden" libraries, object files and flags are used when # linking a shared library. output_verbose_link_cmd='$CC -shared $CFLAGS -v conftest.$objext 2>&1 | $GREP -v "^Configured with:" | $GREP "\-L"' else # FIXME: insert proper C++ library support _LT_TAGVAR(ld_shlibs, $1)=no fi ;; esac ;; psos*) # FIXME: insert proper C++ library support _LT_TAGVAR(ld_shlibs, $1)=no ;; sunos4*) case $cc_basename in CC*) # Sun C++ 4.x # FIXME: insert proper C++ library support _LT_TAGVAR(ld_shlibs, $1)=no ;; lcc*) # Lucid # FIXME: insert proper C++ library support _LT_TAGVAR(ld_shlibs, $1)=no ;; *) # FIXME: insert proper C++ library support _LT_TAGVAR(ld_shlibs, $1)=no ;; esac ;; solaris*) case $cc_basename in CC* | sunCC*) # Sun C++ 4.2, 5.x and Centerline C++ _LT_TAGVAR(archive_cmds_need_lc,$1)=yes _LT_TAGVAR(no_undefined_flag, $1)=' -zdefs' _LT_TAGVAR(archive_cmds, $1)='$CC -G${allow_undefined_flag} -h$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' _LT_TAGVAR(archive_expsym_cmds, $1)='echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~echo "local: *; };" >> $lib.exp~ $CC -G${allow_undefined_flag} ${wl}-M ${wl}$lib.exp -h$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags~$RM $lib.exp' _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-R$libdir' _LT_TAGVAR(hardcode_shlibpath_var, $1)=no case $host_os in solaris2.[[0-5]] | solaris2.[[0-5]].*) ;; *) # The compiler driver will combine and reorder linker options, # but understands `-z linker_flag'. # Supported since Solaris 2.6 (maybe 2.5.1?) _LT_TAGVAR(whole_archive_flag_spec, $1)='-z allextract$convenience -z defaultextract' ;; esac _LT_TAGVAR(link_all_deplibs, $1)=yes output_verbose_link_cmd='func_echo_all' # Archives containing C++ object files must be created using # "CC -xar", where "CC" is the Sun C++ compiler. This is # necessary to make sure instantiated templates are included # in the archive. _LT_TAGVAR(old_archive_cmds, $1)='$CC -xar -o $oldlib $oldobjs' ;; gcx*) # Green Hills C++ Compiler _LT_TAGVAR(archive_cmds, $1)='$CC -shared $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-h $wl$soname -o $lib' # The C++ compiler must be used to create the archive. _LT_TAGVAR(old_archive_cmds, $1)='$CC $LDFLAGS -archive -o $oldlib $oldobjs' ;; *) # GNU C++ compiler with Solaris linker if test "$GXX" = yes && test "$with_gnu_ld" = no; then _LT_TAGVAR(no_undefined_flag, $1)=' ${wl}-z ${wl}defs' if $CC --version | $GREP -v '^2\.7' > /dev/null; then _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag -nostdlib $LDFLAGS $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-h $wl$soname -o $lib' _LT_TAGVAR(archive_expsym_cmds, $1)='echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~echo "local: *; };" >> $lib.exp~ $CC -shared $pic_flag -nostdlib ${wl}-M $wl$lib.exp -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags~$RM $lib.exp' # Commands to make compiler produce verbose output that lists # what "hidden" libraries, object files and flags are used when # linking a shared library. output_verbose_link_cmd='$CC -shared $CFLAGS -v conftest.$objext 2>&1 | $GREP -v "^Configured with:" | $GREP "\-L"' else # g++ 2.7 appears to require `-G' NOT `-shared' on this # platform. _LT_TAGVAR(archive_cmds, $1)='$CC -G -nostdlib $LDFLAGS $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-h $wl$soname -o $lib' _LT_TAGVAR(archive_expsym_cmds, $1)='echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~echo "local: *; };" >> $lib.exp~ $CC -G -nostdlib ${wl}-M $wl$lib.exp -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags~$RM $lib.exp' # Commands to make compiler produce verbose output that lists # what "hidden" libraries, object files and flags are used when # linking a shared library. output_verbose_link_cmd='$CC -G $CFLAGS -v conftest.$objext 2>&1 | $GREP -v "^Configured with:" | $GREP "\-L"' fi _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-R $wl$libdir' case $host_os in solaris2.[[0-5]] | solaris2.[[0-5]].*) ;; *) _LT_TAGVAR(whole_archive_flag_spec, $1)='${wl}-z ${wl}allextract$convenience ${wl}-z ${wl}defaultextract' ;; esac fi ;; esac ;; sysv4*uw2* | sysv5OpenUNIX* | sysv5UnixWare7.[[01]].[[10]]* | unixware7* | sco3.2v5.0.[[024]]*) _LT_TAGVAR(no_undefined_flag, $1)='${wl}-z,text' _LT_TAGVAR(archive_cmds_need_lc, $1)=no _LT_TAGVAR(hardcode_shlibpath_var, $1)=no runpath_var='LD_RUN_PATH' case $cc_basename in CC*) _LT_TAGVAR(archive_cmds, $1)='$CC -G ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -G ${wl}-Bexport:$export_symbols ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' ;; *) _LT_TAGVAR(archive_cmds, $1)='$CC -shared ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared ${wl}-Bexport:$export_symbols ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' ;; esac ;; sysv5* | sco3.2v5* | sco5v6*) # Note: We can NOT use -z defs as we might desire, because we do not # link with -lc, and that would cause any symbols used from libc to # always be unresolved, which means just about no library would # ever link correctly. If we're not using GNU ld we use -z text # though, which does catch some bad symbols but isn't as heavy-handed # as -z defs. _LT_TAGVAR(no_undefined_flag, $1)='${wl}-z,text' _LT_TAGVAR(allow_undefined_flag, $1)='${wl}-z,nodefs' _LT_TAGVAR(archive_cmds_need_lc, $1)=no _LT_TAGVAR(hardcode_shlibpath_var, $1)=no _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-R,$libdir' _LT_TAGVAR(hardcode_libdir_separator, $1)=':' _LT_TAGVAR(link_all_deplibs, $1)=yes _LT_TAGVAR(export_dynamic_flag_spec, $1)='${wl}-Bexport' runpath_var='LD_RUN_PATH' case $cc_basename in CC*) _LT_TAGVAR(archive_cmds, $1)='$CC -G ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -G ${wl}-Bexport:$export_symbols ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' _LT_TAGVAR(old_archive_cmds, $1)='$CC -Tprelink_objects $oldobjs~ '"$_LT_TAGVAR(old_archive_cmds, $1)" _LT_TAGVAR(reload_cmds, $1)='$CC -Tprelink_objects $reload_objs~ '"$_LT_TAGVAR(reload_cmds, $1)" ;; *) _LT_TAGVAR(archive_cmds, $1)='$CC -shared ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared ${wl}-Bexport:$export_symbols ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' ;; esac ;; tandem*) case $cc_basename in NCC*) # NonStop-UX NCC 3.20 # FIXME: insert proper C++ library support _LT_TAGVAR(ld_shlibs, $1)=no ;; *) # FIXME: insert proper C++ library support _LT_TAGVAR(ld_shlibs, $1)=no ;; esac ;; vxworks*) # FIXME: insert proper C++ library support _LT_TAGVAR(ld_shlibs, $1)=no ;; *) # FIXME: insert proper C++ library support _LT_TAGVAR(ld_shlibs, $1)=no ;; esac AC_MSG_RESULT([$_LT_TAGVAR(ld_shlibs, $1)]) test "$_LT_TAGVAR(ld_shlibs, $1)" = no && can_build_shared=no _LT_TAGVAR(GCC, $1)="$GXX" _LT_TAGVAR(LD, $1)="$LD" ## CAVEAT EMPTOR: ## There is no encapsulation within the following macros, do not change ## the running order or otherwise move them around unless you know exactly ## what you are doing... _LT_SYS_HIDDEN_LIBDEPS($1) _LT_COMPILER_PIC($1) _LT_COMPILER_C_O($1) _LT_COMPILER_FILE_LOCKS($1) _LT_LINKER_SHLIBS($1) _LT_SYS_DYNAMIC_LINKER($1) _LT_LINKER_HARDCODE_LIBPATH($1) _LT_CONFIG($1) fi # test -n "$compiler" CC=$lt_save_CC CFLAGS=$lt_save_CFLAGS LDCXX=$LD LD=$lt_save_LD GCC=$lt_save_GCC with_gnu_ld=$lt_save_with_gnu_ld lt_cv_path_LDCXX=$lt_cv_path_LD lt_cv_path_LD=$lt_save_path_LD lt_cv_prog_gnu_ldcxx=$lt_cv_prog_gnu_ld lt_cv_prog_gnu_ld=$lt_save_with_gnu_ld fi # test "$_lt_caught_CXX_error" != yes AC_LANG_POP ])# _LT_LANG_CXX_CONFIG # _LT_FUNC_STRIPNAME_CNF # ---------------------- # func_stripname_cnf prefix suffix name # strip PREFIX and SUFFIX off of NAME. # PREFIX and SUFFIX must not contain globbing or regex special # characters, hashes, percent signs, but SUFFIX may contain a leading # dot (in which case that matches only a dot). # # This function is identical to the (non-XSI) version of func_stripname, # except this one can be used by m4 code that may be executed by configure, # rather than the libtool script. m4_defun([_LT_FUNC_STRIPNAME_CNF],[dnl AC_REQUIRE([_LT_DECL_SED]) AC_REQUIRE([_LT_PROG_ECHO_BACKSLASH]) func_stripname_cnf () { case ${2} in .*) func_stripname_result=`$ECHO "${3}" | $SED "s%^${1}%%; s%\\\\${2}\$%%"`;; *) func_stripname_result=`$ECHO "${3}" | $SED "s%^${1}%%; s%${2}\$%%"`;; esac } # func_stripname_cnf ])# _LT_FUNC_STRIPNAME_CNF # _LT_SYS_HIDDEN_LIBDEPS([TAGNAME]) # --------------------------------- # Figure out "hidden" library dependencies from verbose # compiler output when linking a shared library. # Parse the compiler output and extract the necessary # objects, libraries and library flags. m4_defun([_LT_SYS_HIDDEN_LIBDEPS], [m4_require([_LT_FILEUTILS_DEFAULTS])dnl AC_REQUIRE([_LT_FUNC_STRIPNAME_CNF])dnl # Dependencies to place before and after the object being linked: _LT_TAGVAR(predep_objects, $1)= _LT_TAGVAR(postdep_objects, $1)= _LT_TAGVAR(predeps, $1)= _LT_TAGVAR(postdeps, $1)= _LT_TAGVAR(compiler_lib_search_path, $1)= dnl we can't use the lt_simple_compile_test_code here, dnl because it contains code intended for an executable, dnl not a library. It's possible we should let each dnl tag define a new lt_????_link_test_code variable, dnl but it's only used here... m4_if([$1], [], [cat > conftest.$ac_ext <<_LT_EOF int a; void foo (void) { a = 0; } _LT_EOF ], [$1], [CXX], [cat > conftest.$ac_ext <<_LT_EOF class Foo { public: Foo (void) { a = 0; } private: int a; }; _LT_EOF ], [$1], [F77], [cat > conftest.$ac_ext <<_LT_EOF subroutine foo implicit none integer*4 a a=0 return end _LT_EOF ], [$1], [FC], [cat > conftest.$ac_ext <<_LT_EOF subroutine foo implicit none integer a a=0 return end _LT_EOF ], [$1], [GCJ], [cat > conftest.$ac_ext <<_LT_EOF public class foo { private int a; public void bar (void) { a = 0; } }; _LT_EOF ], [$1], [GO], [cat > conftest.$ac_ext <<_LT_EOF package foo func foo() { } _LT_EOF ]) _lt_libdeps_save_CFLAGS=$CFLAGS case "$CC $CFLAGS " in #( *\ -flto*\ *) CFLAGS="$CFLAGS -fno-lto" ;; *\ -fwhopr*\ *) CFLAGS="$CFLAGS -fno-whopr" ;; *\ -fuse-linker-plugin*\ *) CFLAGS="$CFLAGS -fno-use-linker-plugin" ;; esac dnl Parse the compiler output and extract the necessary dnl objects, libraries and library flags. if AC_TRY_EVAL(ac_compile); then # Parse the compiler output and extract the necessary # objects, libraries and library flags. # Sentinel used to keep track of whether or not we are before # the conftest object file. pre_test_object_deps_done=no for p in `eval "$output_verbose_link_cmd"`; do case ${prev}${p} in -L* | -R* | -l*) # Some compilers place space between "-{L,R}" and the path. # Remove the space. if test $p = "-L" || test $p = "-R"; then prev=$p continue fi # Expand the sysroot to ease extracting the directories later. if test -z "$prev"; then case $p in -L*) func_stripname_cnf '-L' '' "$p"; prev=-L; p=$func_stripname_result ;; -R*) func_stripname_cnf '-R' '' "$p"; prev=-R; p=$func_stripname_result ;; -l*) func_stripname_cnf '-l' '' "$p"; prev=-l; p=$func_stripname_result ;; esac fi case $p in =*) func_stripname_cnf '=' '' "$p"; p=$lt_sysroot$func_stripname_result ;; esac if test "$pre_test_object_deps_done" = no; then case ${prev} in -L | -R) # Internal compiler library paths should come after those # provided the user. The postdeps already come after the # user supplied libs so there is no need to process them. if test -z "$_LT_TAGVAR(compiler_lib_search_path, $1)"; then _LT_TAGVAR(compiler_lib_search_path, $1)="${prev}${p}" else _LT_TAGVAR(compiler_lib_search_path, $1)="${_LT_TAGVAR(compiler_lib_search_path, $1)} ${prev}${p}" fi ;; # The "-l" case would never come before the object being # linked, so don't bother handling this case. esac else if test -z "$_LT_TAGVAR(postdeps, $1)"; then _LT_TAGVAR(postdeps, $1)="${prev}${p}" else _LT_TAGVAR(postdeps, $1)="${_LT_TAGVAR(postdeps, $1)} ${prev}${p}" fi fi prev= ;; *.lto.$objext) ;; # Ignore GCC LTO objects *.$objext) # This assumes that the test object file only shows up # once in the compiler output. if test "$p" = "conftest.$objext"; then pre_test_object_deps_done=yes continue fi if test "$pre_test_object_deps_done" = no; then if test -z "$_LT_TAGVAR(predep_objects, $1)"; then _LT_TAGVAR(predep_objects, $1)="$p" else _LT_TAGVAR(predep_objects, $1)="$_LT_TAGVAR(predep_objects, $1) $p" fi else if test -z "$_LT_TAGVAR(postdep_objects, $1)"; then _LT_TAGVAR(postdep_objects, $1)="$p" else _LT_TAGVAR(postdep_objects, $1)="$_LT_TAGVAR(postdep_objects, $1) $p" fi fi ;; *) ;; # Ignore the rest. esac done # Clean up. rm -f a.out a.exe else echo "libtool.m4: error: problem compiling $1 test program" fi $RM -f confest.$objext CFLAGS=$_lt_libdeps_save_CFLAGS # PORTME: override above test on systems where it is broken m4_if([$1], [CXX], [case $host_os in interix[[3-9]]*) # Interix 3.5 installs completely hosed .la files for C++, so rather than # hack all around it, let's just trust "g++" to DTRT. _LT_TAGVAR(predep_objects,$1)= _LT_TAGVAR(postdep_objects,$1)= _LT_TAGVAR(postdeps,$1)= ;; linux*) case `$CC -V 2>&1 | sed 5q` in *Sun\ C*) # Sun C++ 5.9 # The more standards-conforming stlport4 library is # incompatible with the Cstd library. Avoid specifying # it if it's in CXXFLAGS. Ignore libCrun as # -library=stlport4 depends on it. case " $CXX $CXXFLAGS " in *" -library=stlport4 "*) solaris_use_stlport4=yes ;; esac if test "$solaris_use_stlport4" != yes; then _LT_TAGVAR(postdeps,$1)='-library=Cstd -library=Crun' fi ;; esac ;; solaris*) case $cc_basename in CC* | sunCC*) # The more standards-conforming stlport4 library is # incompatible with the Cstd library. Avoid specifying # it if it's in CXXFLAGS. Ignore libCrun as # -library=stlport4 depends on it. case " $CXX $CXXFLAGS " in *" -library=stlport4 "*) solaris_use_stlport4=yes ;; esac # Adding this requires a known-good setup of shared libraries for # Sun compiler versions before 5.6, else PIC objects from an old # archive will be linked into the output, leading to subtle bugs. if test "$solaris_use_stlport4" != yes; then _LT_TAGVAR(postdeps,$1)='-library=Cstd -library=Crun' fi ;; esac ;; esac ]) case " $_LT_TAGVAR(postdeps, $1) " in *" -lc "*) _LT_TAGVAR(archive_cmds_need_lc, $1)=no ;; esac _LT_TAGVAR(compiler_lib_search_dirs, $1)= if test -n "${_LT_TAGVAR(compiler_lib_search_path, $1)}"; then _LT_TAGVAR(compiler_lib_search_dirs, $1)=`echo " ${_LT_TAGVAR(compiler_lib_search_path, $1)}" | ${SED} -e 's! -L! !g' -e 's!^ !!'` fi _LT_TAGDECL([], [compiler_lib_search_dirs], [1], [The directories searched by this compiler when creating a shared library]) _LT_TAGDECL([], [predep_objects], [1], [Dependencies to place before and after the objects being linked to create a shared library]) _LT_TAGDECL([], [postdep_objects], [1]) _LT_TAGDECL([], [predeps], [1]) _LT_TAGDECL([], [postdeps], [1]) _LT_TAGDECL([], [compiler_lib_search_path], [1], [The library search path used internally by the compiler when linking a shared library]) ])# _LT_SYS_HIDDEN_LIBDEPS # _LT_LANG_F77_CONFIG([TAG]) # -------------------------- # Ensure that the configuration variables for a Fortran 77 compiler are # suitably defined. These variables are subsequently used by _LT_CONFIG # to write the compiler configuration to `libtool'. m4_defun([_LT_LANG_F77_CONFIG], [AC_LANG_PUSH(Fortran 77) if test -z "$F77" || test "X$F77" = "Xno"; then _lt_disable_F77=yes fi _LT_TAGVAR(archive_cmds_need_lc, $1)=no _LT_TAGVAR(allow_undefined_flag, $1)= _LT_TAGVAR(always_export_symbols, $1)=no _LT_TAGVAR(archive_expsym_cmds, $1)= _LT_TAGVAR(export_dynamic_flag_spec, $1)= _LT_TAGVAR(hardcode_direct, $1)=no _LT_TAGVAR(hardcode_direct_absolute, $1)=no _LT_TAGVAR(hardcode_libdir_flag_spec, $1)= _LT_TAGVAR(hardcode_libdir_separator, $1)= _LT_TAGVAR(hardcode_minus_L, $1)=no _LT_TAGVAR(hardcode_automatic, $1)=no _LT_TAGVAR(inherit_rpath, $1)=no _LT_TAGVAR(module_cmds, $1)= _LT_TAGVAR(module_expsym_cmds, $1)= _LT_TAGVAR(link_all_deplibs, $1)=unknown _LT_TAGVAR(old_archive_cmds, $1)=$old_archive_cmds _LT_TAGVAR(reload_flag, $1)=$reload_flag _LT_TAGVAR(reload_cmds, $1)=$reload_cmds _LT_TAGVAR(no_undefined_flag, $1)= _LT_TAGVAR(whole_archive_flag_spec, $1)= _LT_TAGVAR(enable_shared_with_static_runtimes, $1)=no # Source file extension for f77 test sources. ac_ext=f # Object file extension for compiled f77 test sources. objext=o _LT_TAGVAR(objext, $1)=$objext # No sense in running all these tests if we already determined that # the F77 compiler isn't working. Some variables (like enable_shared) # are currently assumed to apply to all compilers on this platform, # and will be corrupted by setting them based on a non-working compiler. if test "$_lt_disable_F77" != yes; then # Code to be used in simple compile tests lt_simple_compile_test_code="\ subroutine t return end " # Code to be used in simple link tests lt_simple_link_test_code="\ program t end " # ltmain only uses $CC for tagged configurations so make sure $CC is set. _LT_TAG_COMPILER # save warnings/boilerplate of simple test code _LT_COMPILER_BOILERPLATE _LT_LINKER_BOILERPLATE # Allow CC to be a program name with arguments. lt_save_CC="$CC" lt_save_GCC=$GCC lt_save_CFLAGS=$CFLAGS CC=${F77-"f77"} CFLAGS=$FFLAGS compiler=$CC _LT_TAGVAR(compiler, $1)=$CC _LT_CC_BASENAME([$compiler]) GCC=$G77 if test -n "$compiler"; then AC_MSG_CHECKING([if libtool supports shared libraries]) AC_MSG_RESULT([$can_build_shared]) AC_MSG_CHECKING([whether to build shared libraries]) test "$can_build_shared" = "no" && enable_shared=no # On AIX, shared libraries and static libraries use the same namespace, and # are all built from PIC. case $host_os in aix3*) test "$enable_shared" = yes && enable_static=no if test -n "$RANLIB"; then archive_cmds="$archive_cmds~\$RANLIB \$lib" postinstall_cmds='$RANLIB $lib' fi ;; aix[[4-9]]*) if test "$host_cpu" != ia64 && test "$aix_use_runtimelinking" = no ; then test "$enable_shared" = yes && enable_static=no fi ;; esac AC_MSG_RESULT([$enable_shared]) AC_MSG_CHECKING([whether to build static libraries]) # Make sure either enable_shared or enable_static is yes. test "$enable_shared" = yes || enable_static=yes AC_MSG_RESULT([$enable_static]) _LT_TAGVAR(GCC, $1)="$G77" _LT_TAGVAR(LD, $1)="$LD" ## CAVEAT EMPTOR: ## There is no encapsulation within the following macros, do not change ## the running order or otherwise move them around unless you know exactly ## what you are doing... _LT_COMPILER_PIC($1) _LT_COMPILER_C_O($1) _LT_COMPILER_FILE_LOCKS($1) _LT_LINKER_SHLIBS($1) _LT_SYS_DYNAMIC_LINKER($1) _LT_LINKER_HARDCODE_LIBPATH($1) _LT_CONFIG($1) fi # test -n "$compiler" GCC=$lt_save_GCC CC="$lt_save_CC" CFLAGS="$lt_save_CFLAGS" fi # test "$_lt_disable_F77" != yes AC_LANG_POP ])# _LT_LANG_F77_CONFIG # _LT_LANG_FC_CONFIG([TAG]) # ------------------------- # Ensure that the configuration variables for a Fortran compiler are # suitably defined. These variables are subsequently used by _LT_CONFIG # to write the compiler configuration to `libtool'. m4_defun([_LT_LANG_FC_CONFIG], [AC_LANG_PUSH(Fortran) if test -z "$FC" || test "X$FC" = "Xno"; then _lt_disable_FC=yes fi _LT_TAGVAR(archive_cmds_need_lc, $1)=no _LT_TAGVAR(allow_undefined_flag, $1)= _LT_TAGVAR(always_export_symbols, $1)=no _LT_TAGVAR(archive_expsym_cmds, $1)= _LT_TAGVAR(export_dynamic_flag_spec, $1)= _LT_TAGVAR(hardcode_direct, $1)=no _LT_TAGVAR(hardcode_direct_absolute, $1)=no _LT_TAGVAR(hardcode_libdir_flag_spec, $1)= _LT_TAGVAR(hardcode_libdir_separator, $1)= _LT_TAGVAR(hardcode_minus_L, $1)=no _LT_TAGVAR(hardcode_automatic, $1)=no _LT_TAGVAR(inherit_rpath, $1)=no _LT_TAGVAR(module_cmds, $1)= _LT_TAGVAR(module_expsym_cmds, $1)= _LT_TAGVAR(link_all_deplibs, $1)=unknown _LT_TAGVAR(old_archive_cmds, $1)=$old_archive_cmds _LT_TAGVAR(reload_flag, $1)=$reload_flag _LT_TAGVAR(reload_cmds, $1)=$reload_cmds _LT_TAGVAR(no_undefined_flag, $1)= _LT_TAGVAR(whole_archive_flag_spec, $1)= _LT_TAGVAR(enable_shared_with_static_runtimes, $1)=no # Source file extension for fc test sources. ac_ext=${ac_fc_srcext-f} # Object file extension for compiled fc test sources. objext=o _LT_TAGVAR(objext, $1)=$objext # No sense in running all these tests if we already determined that # the FC compiler isn't working. Some variables (like enable_shared) # are currently assumed to apply to all compilers on this platform, # and will be corrupted by setting them based on a non-working compiler. if test "$_lt_disable_FC" != yes; then # Code to be used in simple compile tests lt_simple_compile_test_code="\ subroutine t return end " # Code to be used in simple link tests lt_simple_link_test_code="\ program t end " # ltmain only uses $CC for tagged configurations so make sure $CC is set. _LT_TAG_COMPILER # save warnings/boilerplate of simple test code _LT_COMPILER_BOILERPLATE _LT_LINKER_BOILERPLATE # Allow CC to be a program name with arguments. lt_save_CC="$CC" lt_save_GCC=$GCC lt_save_CFLAGS=$CFLAGS CC=${FC-"f95"} CFLAGS=$FCFLAGS compiler=$CC GCC=$ac_cv_fc_compiler_gnu _LT_TAGVAR(compiler, $1)=$CC _LT_CC_BASENAME([$compiler]) if test -n "$compiler"; then AC_MSG_CHECKING([if libtool supports shared libraries]) AC_MSG_RESULT([$can_build_shared]) AC_MSG_CHECKING([whether to build shared libraries]) test "$can_build_shared" = "no" && enable_shared=no # On AIX, shared libraries and static libraries use the same namespace, and # are all built from PIC. case $host_os in aix3*) test "$enable_shared" = yes && enable_static=no if test -n "$RANLIB"; then archive_cmds="$archive_cmds~\$RANLIB \$lib" postinstall_cmds='$RANLIB $lib' fi ;; aix[[4-9]]*) if test "$host_cpu" != ia64 && test "$aix_use_runtimelinking" = no ; then test "$enable_shared" = yes && enable_static=no fi ;; esac AC_MSG_RESULT([$enable_shared]) AC_MSG_CHECKING([whether to build static libraries]) # Make sure either enable_shared or enable_static is yes. test "$enable_shared" = yes || enable_static=yes AC_MSG_RESULT([$enable_static]) _LT_TAGVAR(GCC, $1)="$ac_cv_fc_compiler_gnu" _LT_TAGVAR(LD, $1)="$LD" ## CAVEAT EMPTOR: ## There is no encapsulation within the following macros, do not change ## the running order or otherwise move them around unless you know exactly ## what you are doing... _LT_SYS_HIDDEN_LIBDEPS($1) _LT_COMPILER_PIC($1) _LT_COMPILER_C_O($1) _LT_COMPILER_FILE_LOCKS($1) _LT_LINKER_SHLIBS($1) _LT_SYS_DYNAMIC_LINKER($1) _LT_LINKER_HARDCODE_LIBPATH($1) _LT_CONFIG($1) fi # test -n "$compiler" GCC=$lt_save_GCC CC=$lt_save_CC CFLAGS=$lt_save_CFLAGS fi # test "$_lt_disable_FC" != yes AC_LANG_POP ])# _LT_LANG_FC_CONFIG # _LT_LANG_GCJ_CONFIG([TAG]) # -------------------------- # Ensure that the configuration variables for the GNU Java Compiler compiler # are suitably defined. These variables are subsequently used by _LT_CONFIG # to write the compiler configuration to `libtool'. m4_defun([_LT_LANG_GCJ_CONFIG], [AC_REQUIRE([LT_PROG_GCJ])dnl AC_LANG_SAVE # Source file extension for Java test sources. ac_ext=java # Object file extension for compiled Java test sources. objext=o _LT_TAGVAR(objext, $1)=$objext # Code to be used in simple compile tests lt_simple_compile_test_code="class foo {}" # Code to be used in simple link tests lt_simple_link_test_code='public class conftest { public static void main(String[[]] argv) {}; }' # ltmain only uses $CC for tagged configurations so make sure $CC is set. _LT_TAG_COMPILER # save warnings/boilerplate of simple test code _LT_COMPILER_BOILERPLATE _LT_LINKER_BOILERPLATE # Allow CC to be a program name with arguments. lt_save_CC=$CC lt_save_CFLAGS=$CFLAGS lt_save_GCC=$GCC GCC=yes CC=${GCJ-"gcj"} CFLAGS=$GCJFLAGS compiler=$CC _LT_TAGVAR(compiler, $1)=$CC _LT_TAGVAR(LD, $1)="$LD" _LT_CC_BASENAME([$compiler]) # GCJ did not exist at the time GCC didn't implicitly link libc in. _LT_TAGVAR(archive_cmds_need_lc, $1)=no _LT_TAGVAR(old_archive_cmds, $1)=$old_archive_cmds _LT_TAGVAR(reload_flag, $1)=$reload_flag _LT_TAGVAR(reload_cmds, $1)=$reload_cmds ## CAVEAT EMPTOR: ## There is no encapsulation within the following macros, do not change ## the running order or otherwise move them around unless you know exactly ## what you are doing... if test -n "$compiler"; then _LT_COMPILER_NO_RTTI($1) _LT_COMPILER_PIC($1) _LT_COMPILER_C_O($1) _LT_COMPILER_FILE_LOCKS($1) _LT_LINKER_SHLIBS($1) _LT_LINKER_HARDCODE_LIBPATH($1) _LT_CONFIG($1) fi AC_LANG_RESTORE GCC=$lt_save_GCC CC=$lt_save_CC CFLAGS=$lt_save_CFLAGS ])# _LT_LANG_GCJ_CONFIG # _LT_LANG_GO_CONFIG([TAG]) # -------------------------- # Ensure that the configuration variables for the GNU Go compiler # are suitably defined. These variables are subsequently used by _LT_CONFIG # to write the compiler configuration to `libtool'. m4_defun([_LT_LANG_GO_CONFIG], [AC_REQUIRE([LT_PROG_GO])dnl AC_LANG_SAVE # Source file extension for Go test sources. ac_ext=go # Object file extension for compiled Go test sources. objext=o _LT_TAGVAR(objext, $1)=$objext # Code to be used in simple compile tests lt_simple_compile_test_code="package main; func main() { }" # Code to be used in simple link tests lt_simple_link_test_code='package main; func main() { }' # ltmain only uses $CC for tagged configurations so make sure $CC is set. _LT_TAG_COMPILER # save warnings/boilerplate of simple test code _LT_COMPILER_BOILERPLATE _LT_LINKER_BOILERPLATE # Allow CC to be a program name with arguments. lt_save_CC=$CC lt_save_CFLAGS=$CFLAGS lt_save_GCC=$GCC GCC=yes CC=${GOC-"gccgo"} CFLAGS=$GOFLAGS compiler=$CC _LT_TAGVAR(compiler, $1)=$CC _LT_TAGVAR(LD, $1)="$LD" _LT_CC_BASENAME([$compiler]) # Go did not exist at the time GCC didn't implicitly link libc in. _LT_TAGVAR(archive_cmds_need_lc, $1)=no _LT_TAGVAR(old_archive_cmds, $1)=$old_archive_cmds _LT_TAGVAR(reload_flag, $1)=$reload_flag _LT_TAGVAR(reload_cmds, $1)=$reload_cmds ## CAVEAT EMPTOR: ## There is no encapsulation within the following macros, do not change ## the running order or otherwise move them around unless you know exactly ## what you are doing... if test -n "$compiler"; then _LT_COMPILER_NO_RTTI($1) _LT_COMPILER_PIC($1) _LT_COMPILER_C_O($1) _LT_COMPILER_FILE_LOCKS($1) _LT_LINKER_SHLIBS($1) _LT_LINKER_HARDCODE_LIBPATH($1) _LT_CONFIG($1) fi AC_LANG_RESTORE GCC=$lt_save_GCC CC=$lt_save_CC CFLAGS=$lt_save_CFLAGS ])# _LT_LANG_GO_CONFIG # _LT_LANG_RC_CONFIG([TAG]) # ------------------------- # Ensure that the configuration variables for the Windows resource compiler # are suitably defined. These variables are subsequently used by _LT_CONFIG # to write the compiler configuration to `libtool'. m4_defun([_LT_LANG_RC_CONFIG], [AC_REQUIRE([LT_PROG_RC])dnl AC_LANG_SAVE # Source file extension for RC test sources. ac_ext=rc # Object file extension for compiled RC test sources. objext=o _LT_TAGVAR(objext, $1)=$objext # Code to be used in simple compile tests lt_simple_compile_test_code='sample MENU { MENUITEM "&Soup", 100, CHECKED }' # Code to be used in simple link tests lt_simple_link_test_code="$lt_simple_compile_test_code" # ltmain only uses $CC for tagged configurations so make sure $CC is set. _LT_TAG_COMPILER # save warnings/boilerplate of simple test code _LT_COMPILER_BOILERPLATE _LT_LINKER_BOILERPLATE # Allow CC to be a program name with arguments. lt_save_CC="$CC" lt_save_CFLAGS=$CFLAGS lt_save_GCC=$GCC GCC= CC=${RC-"windres"} CFLAGS= compiler=$CC _LT_TAGVAR(compiler, $1)=$CC _LT_CC_BASENAME([$compiler]) _LT_TAGVAR(lt_cv_prog_compiler_c_o, $1)=yes if test -n "$compiler"; then : _LT_CONFIG($1) fi GCC=$lt_save_GCC AC_LANG_RESTORE CC=$lt_save_CC CFLAGS=$lt_save_CFLAGS ])# _LT_LANG_RC_CONFIG # LT_PROG_GCJ # ----------- AC_DEFUN([LT_PROG_GCJ], [m4_ifdef([AC_PROG_GCJ], [AC_PROG_GCJ], [m4_ifdef([A][M_PROG_GCJ], [A][M_PROG_GCJ], [AC_CHECK_TOOL(GCJ, gcj,) test "x${GCJFLAGS+set}" = xset || GCJFLAGS="-g -O2" AC_SUBST(GCJFLAGS)])])[]dnl ]) # Old name: AU_ALIAS([LT_AC_PROG_GCJ], [LT_PROG_GCJ]) dnl aclocal-1.4 backwards compatibility: dnl AC_DEFUN([LT_AC_PROG_GCJ], []) # LT_PROG_GO # ---------- AC_DEFUN([LT_PROG_GO], [AC_CHECK_TOOL(GOC, gccgo,) ]) # LT_PROG_RC # ---------- AC_DEFUN([LT_PROG_RC], [AC_CHECK_TOOL(RC, windres,) ]) # Old name: AU_ALIAS([LT_AC_PROG_RC], [LT_PROG_RC]) dnl aclocal-1.4 backwards compatibility: dnl AC_DEFUN([LT_AC_PROG_RC], []) # _LT_DECL_EGREP # -------------- # If we don't have a new enough Autoconf to choose the best grep # available, choose the one first in the user's PATH. m4_defun([_LT_DECL_EGREP], [AC_REQUIRE([AC_PROG_EGREP])dnl AC_REQUIRE([AC_PROG_FGREP])dnl test -z "$GREP" && GREP=grep _LT_DECL([], [GREP], [1], [A grep program that handles long lines]) _LT_DECL([], [EGREP], [1], [An ERE matcher]) _LT_DECL([], [FGREP], [1], [A literal string matcher]) dnl Non-bleeding-edge autoconf doesn't subst GREP, so do it here too AC_SUBST([GREP]) ]) # _LT_DECL_OBJDUMP # -------------- # If we don't have a new enough Autoconf to choose the best objdump # available, choose the one first in the user's PATH. m4_defun([_LT_DECL_OBJDUMP], [AC_CHECK_TOOL(OBJDUMP, objdump, false) test -z "$OBJDUMP" && OBJDUMP=objdump _LT_DECL([], [OBJDUMP], [1], [An object symbol dumper]) AC_SUBST([OBJDUMP]) ]) # _LT_DECL_DLLTOOL # ---------------- # Ensure DLLTOOL variable is set. m4_defun([_LT_DECL_DLLTOOL], [AC_CHECK_TOOL(DLLTOOL, dlltool, false) test -z "$DLLTOOL" && DLLTOOL=dlltool _LT_DECL([], [DLLTOOL], [1], [DLL creation program]) AC_SUBST([DLLTOOL]) ]) # _LT_DECL_SED # ------------ # Check for a fully-functional sed program, that truncates # as few characters as possible. Prefer GNU sed if found. m4_defun([_LT_DECL_SED], [AC_PROG_SED test -z "$SED" && SED=sed Xsed="$SED -e 1s/^X//" _LT_DECL([], [SED], [1], [A sed program that does not truncate output]) _LT_DECL([], [Xsed], ["\$SED -e 1s/^X//"], [Sed that helps us avoid accidentally triggering echo(1) options like -n]) ])# _LT_DECL_SED m4_ifndef([AC_PROG_SED], [ ############################################################ # NOTE: This macro has been submitted for inclusion into # # GNU Autoconf as AC_PROG_SED. When it is available in # # a released version of Autoconf we should remove this # # macro and use it instead. # ############################################################ m4_defun([AC_PROG_SED], [AC_MSG_CHECKING([for a sed that does not truncate output]) AC_CACHE_VAL(lt_cv_path_SED, [# Loop through the user's path and test for sed and gsed. # Then use that list of sed's as ones to test for truncation. as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for lt_ac_prog in sed gsed; do for ac_exec_ext in '' $ac_executable_extensions; do if $as_executable_p "$as_dir/$lt_ac_prog$ac_exec_ext"; then lt_ac_sed_list="$lt_ac_sed_list $as_dir/$lt_ac_prog$ac_exec_ext" fi done done done IFS=$as_save_IFS lt_ac_max=0 lt_ac_count=0 # Add /usr/xpg4/bin/sed as it is typically found on Solaris # along with /bin/sed that truncates output. for lt_ac_sed in $lt_ac_sed_list /usr/xpg4/bin/sed; do test ! -f $lt_ac_sed && continue cat /dev/null > conftest.in lt_ac_count=0 echo $ECHO_N "0123456789$ECHO_C" >conftest.in # Check for GNU sed and select it if it is found. if "$lt_ac_sed" --version 2>&1 < /dev/null | grep 'GNU' > /dev/null; then lt_cv_path_SED=$lt_ac_sed break fi while true; do cat conftest.in conftest.in >conftest.tmp mv conftest.tmp conftest.in cp conftest.in conftest.nl echo >>conftest.nl $lt_ac_sed -e 's/a$//' < conftest.nl >conftest.out || break cmp -s conftest.out conftest.nl || break # 10000 chars as input seems more than enough test $lt_ac_count -gt 10 && break lt_ac_count=`expr $lt_ac_count + 1` if test $lt_ac_count -gt $lt_ac_max; then lt_ac_max=$lt_ac_count lt_cv_path_SED=$lt_ac_sed fi done done ]) SED=$lt_cv_path_SED AC_SUBST([SED]) AC_MSG_RESULT([$SED]) ])#AC_PROG_SED ])#m4_ifndef # Old name: AU_ALIAS([LT_AC_PROG_SED], [AC_PROG_SED]) dnl aclocal-1.4 backwards compatibility: dnl AC_DEFUN([LT_AC_PROG_SED], []) # _LT_CHECK_SHELL_FEATURES # ------------------------ # Find out whether the shell is Bourne or XSI compatible, # or has some other useful features. m4_defun([_LT_CHECK_SHELL_FEATURES], [AC_MSG_CHECKING([whether the shell understands some XSI constructs]) # Try some XSI features xsi_shell=no ( _lt_dummy="a/b/c" test "${_lt_dummy##*/},${_lt_dummy%/*},${_lt_dummy#??}"${_lt_dummy%"$_lt_dummy"}, \ = c,a/b,b/c, \ && eval 'test $(( 1 + 1 )) -eq 2 \ && test "${#_lt_dummy}" -eq 5' ) >/dev/null 2>&1 \ && xsi_shell=yes AC_MSG_RESULT([$xsi_shell]) _LT_CONFIG_LIBTOOL_INIT([xsi_shell='$xsi_shell']) AC_MSG_CHECKING([whether the shell understands "+="]) lt_shell_append=no ( foo=bar; set foo baz; eval "$[1]+=\$[2]" && test "$foo" = barbaz ) \ >/dev/null 2>&1 \ && lt_shell_append=yes AC_MSG_RESULT([$lt_shell_append]) _LT_CONFIG_LIBTOOL_INIT([lt_shell_append='$lt_shell_append']) if ( (MAIL=60; unset MAIL) || exit) >/dev/null 2>&1; then lt_unset=unset else lt_unset=false fi _LT_DECL([], [lt_unset], [0], [whether the shell understands "unset"])dnl # test EBCDIC or ASCII case `echo X|tr X '\101'` in A) # ASCII based system # \n is not interpreted correctly by Solaris 8 /usr/ucb/tr lt_SP2NL='tr \040 \012' lt_NL2SP='tr \015\012 \040\040' ;; *) # EBCDIC based system lt_SP2NL='tr \100 \n' lt_NL2SP='tr \r\n \100\100' ;; esac _LT_DECL([SP2NL], [lt_SP2NL], [1], [turn spaces into newlines])dnl _LT_DECL([NL2SP], [lt_NL2SP], [1], [turn newlines into spaces])dnl ])# _LT_CHECK_SHELL_FEATURES # _LT_PROG_FUNCTION_REPLACE (FUNCNAME, REPLACEMENT-BODY) # ------------------------------------------------------ # In `$cfgfile', look for function FUNCNAME delimited by `^FUNCNAME ()$' and # '^} FUNCNAME ', and replace its body with REPLACEMENT-BODY. m4_defun([_LT_PROG_FUNCTION_REPLACE], [dnl { sed -e '/^$1 ()$/,/^} # $1 /c\ $1 ()\ {\ m4_bpatsubsts([$2], [$], [\\], [^\([ ]\)], [\\\1]) } # Extended-shell $1 implementation' "$cfgfile" > $cfgfile.tmp \ && mv -f "$cfgfile.tmp" "$cfgfile" \ || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") test 0 -eq $? || _lt_function_replace_fail=: ]) # _LT_PROG_REPLACE_SHELLFNS # ------------------------- # Replace existing portable implementations of several shell functions with # equivalent extended shell implementations where those features are available.. m4_defun([_LT_PROG_REPLACE_SHELLFNS], [if test x"$xsi_shell" = xyes; then _LT_PROG_FUNCTION_REPLACE([func_dirname], [dnl case ${1} in */*) func_dirname_result="${1%/*}${2}" ;; * ) func_dirname_result="${3}" ;; esac]) _LT_PROG_FUNCTION_REPLACE([func_basename], [dnl func_basename_result="${1##*/}"]) _LT_PROG_FUNCTION_REPLACE([func_dirname_and_basename], [dnl case ${1} in */*) func_dirname_result="${1%/*}${2}" ;; * ) func_dirname_result="${3}" ;; esac func_basename_result="${1##*/}"]) _LT_PROG_FUNCTION_REPLACE([func_stripname], [dnl # pdksh 5.2.14 does not do ${X%$Y} correctly if both X and Y are # positional parameters, so assign one to ordinary parameter first. func_stripname_result=${3} func_stripname_result=${func_stripname_result#"${1}"} func_stripname_result=${func_stripname_result%"${2}"}]) _LT_PROG_FUNCTION_REPLACE([func_split_long_opt], [dnl func_split_long_opt_name=${1%%=*} func_split_long_opt_arg=${1#*=}]) _LT_PROG_FUNCTION_REPLACE([func_split_short_opt], [dnl func_split_short_opt_arg=${1#??} func_split_short_opt_name=${1%"$func_split_short_opt_arg"}]) _LT_PROG_FUNCTION_REPLACE([func_lo2o], [dnl case ${1} in *.lo) func_lo2o_result=${1%.lo}.${objext} ;; *) func_lo2o_result=${1} ;; esac]) _LT_PROG_FUNCTION_REPLACE([func_xform], [ func_xform_result=${1%.*}.lo]) _LT_PROG_FUNCTION_REPLACE([func_arith], [ func_arith_result=$(( $[*] ))]) _LT_PROG_FUNCTION_REPLACE([func_len], [ func_len_result=${#1}]) fi if test x"$lt_shell_append" = xyes; then _LT_PROG_FUNCTION_REPLACE([func_append], [ eval "${1}+=\\${2}"]) _LT_PROG_FUNCTION_REPLACE([func_append_quoted], [dnl func_quote_for_eval "${2}" dnl m4 expansion turns \\\\ into \\, and then the shell eval turns that into \ eval "${1}+=\\\\ \\$func_quote_for_eval_result"]) # Save a `func_append' function call where possible by direct use of '+=' sed -e 's%func_append \([[a-zA-Z_]]\{1,\}\) "%\1+="%g' $cfgfile > $cfgfile.tmp \ && mv -f "$cfgfile.tmp" "$cfgfile" \ || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") test 0 -eq $? || _lt_function_replace_fail=: else # Save a `func_append' function call even when '+=' is not available sed -e 's%func_append \([[a-zA-Z_]]\{1,\}\) "%\1="$\1%g' $cfgfile > $cfgfile.tmp \ && mv -f "$cfgfile.tmp" "$cfgfile" \ || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") test 0 -eq $? || _lt_function_replace_fail=: fi if test x"$_lt_function_replace_fail" = x":"; then AC_MSG_WARN([Unable to substitute extended shell functions in $ofile]) fi ]) # _LT_PATH_CONVERSION_FUNCTIONS # ----------------------------- # Determine which file name conversion functions should be used by # func_to_host_file (and, implicitly, by func_to_host_path). These are needed # for certain cross-compile configurations and native mingw. m4_defun([_LT_PATH_CONVERSION_FUNCTIONS], [AC_REQUIRE([AC_CANONICAL_HOST])dnl AC_REQUIRE([AC_CANONICAL_BUILD])dnl AC_MSG_CHECKING([how to convert $build file names to $host format]) AC_CACHE_VAL(lt_cv_to_host_file_cmd, [case $host in *-*-mingw* ) case $build in *-*-mingw* ) # actually msys lt_cv_to_host_file_cmd=func_convert_file_msys_to_w32 ;; *-*-cygwin* ) lt_cv_to_host_file_cmd=func_convert_file_cygwin_to_w32 ;; * ) # otherwise, assume *nix lt_cv_to_host_file_cmd=func_convert_file_nix_to_w32 ;; esac ;; *-*-cygwin* ) case $build in *-*-mingw* ) # actually msys lt_cv_to_host_file_cmd=func_convert_file_msys_to_cygwin ;; *-*-cygwin* ) lt_cv_to_host_file_cmd=func_convert_file_noop ;; * ) # otherwise, assume *nix lt_cv_to_host_file_cmd=func_convert_file_nix_to_cygwin ;; esac ;; * ) # unhandled hosts (and "normal" native builds) lt_cv_to_host_file_cmd=func_convert_file_noop ;; esac ]) to_host_file_cmd=$lt_cv_to_host_file_cmd AC_MSG_RESULT([$lt_cv_to_host_file_cmd]) _LT_DECL([to_host_file_cmd], [lt_cv_to_host_file_cmd], [0], [convert $build file names to $host format])dnl AC_MSG_CHECKING([how to convert $build file names to toolchain format]) AC_CACHE_VAL(lt_cv_to_tool_file_cmd, [#assume ordinary cross tools, or native build. lt_cv_to_tool_file_cmd=func_convert_file_noop case $host in *-*-mingw* ) case $build in *-*-mingw* ) # actually msys lt_cv_to_tool_file_cmd=func_convert_file_msys_to_w32 ;; esac ;; esac ]) to_tool_file_cmd=$lt_cv_to_tool_file_cmd AC_MSG_RESULT([$lt_cv_to_tool_file_cmd]) _LT_DECL([to_tool_file_cmd], [lt_cv_to_tool_file_cmd], [0], [convert $build files to toolchain format])dnl ])# _LT_PATH_CONVERSION_FUNCTIONS libstoragemgmt-1.2.3/m4/ltversion.m40000644000175000017500000000126212540163517014272 00000000000000# ltversion.m4 -- version numbers -*- Autoconf -*- # # Copyright (C) 2004 Free Software Foundation, Inc. # Written by Scott James Remnant, 2004 # # This file is free software; the Free Software Foundation gives # unlimited permission to copy and/or distribute it, with or without # modifications, as long as this notice is preserved. # @configure_input@ # serial 3337 ltversion.m4 # This file is part of GNU Libtool m4_define([LT_PACKAGE_VERSION], [2.4.2]) m4_define([LT_PACKAGE_REVISION], [1.3337]) AC_DEFUN([LTVERSION_VERSION], [macro_version='2.4.2' macro_revision='1.3337' _LT_DECL(, macro_version, 0, [Which release of libtool.m4 was used?]) _LT_DECL(, macro_revision, 0) ]) libstoragemgmt-1.2.3/NEWS0000664000175000017500000003417512542455432012177 00000000000000News for libStorageMgmt 1.2.3: Jun 24 2015: - Bug fix: * lsmcli bash completion: Fix syntax error. * lsmcli bash completion: Fix volume-delete. * lsmcli bash completion: Add missing completions. 1.2.2: Jun 23 2015: - Bug fixes: * Fix: selinux dac_override * Manpage: Update hpsa and megaraid plugin manpages. * HP Smart Array Plugin: Fix pool querying on P410i. * MegaRAID Plugin: Fix bug when no volume configured. 1.2.1: Jun 17 2015: - Bug fix: * Fix 'make distcheck' error on bash-completion. 1.2.0: Jun 16 2015 - New features: * The lsmd daemon now allows plugin to run as root using configure file. * Targetd plugin got full access group support. * The simulator plugin switched from risky pickle to sqlite3 for state saving. * Introduced bash auto completion support for lsmcli command. * Two new plugins for hardware RAID cards: * LSI MegaRAID plugin -- megaraid:// Dell PERC and other OEM rebanded MegaRAID cards are also supported. * HP SmartArray plugin -- hpsa:// - Library adds: * New method to query RAID information of volume: Python: lsm.Client.volume_raid_info(). C: lsm_volume_raid_info(). * New method to query pool membership: Python: lsm.Client.pool_member_info(). C: lsm_pool_member_info(). * New disk status constant to indicate certain disk could be used as pool disk or dedicate spare disk. Python: DISK.STATUS_FREE. C: LSM_DISK_STATUS_FREE. * New method to create RAID volume on hardware RAID cards: Python: lsm.Client.volume_raid_create_cap_get(). lsm.Client.volume_raid_create(). C: lsm_volume_raid_create_cap_get(). lsm_volume_raid_create(). * New C plugin register interface for version 1.2 new methods: lsm_register_plugin_v1_2() - Bug fixes and miscellaneous fixes: * lsmd: Fix a possible infinity loop on plugin search. * Fix memory leak in C unit test. * Library: Fix incorrect Volume.vpd83 definition * SMI-S plugin: Fix SMI-S plugin spare disk support. * SMI-S plugin: Fix target_ports() for HP 3PAR and EMC VMAX. * SMI-S plugin: Fix the incorrect profile_check(). * Fix C library compile warning. * Fix C library potential memory leak in handle_volume_raid_create(). * ONTAP, MegaRAID, SMI-S Plugins: Enforce the definition of Pool.status. * Change license statement by replacing address to URL. * lsmd: add error catch for signal. * lsmcli: fix _get_item error message. * C Library: Fix message loop * C Library: Clean up code for DRY(don't repeat yourself). * SMI-S Plugin: Collect xml during exception. * C Library: Remove ambiguity between wrong type or OOM * C code clean up to use K&R code style. * Add Charles Rose to AUTHORS. 1.1.1: Dec 4 2014 - Library adds: API Constants for new pool element types and plugin changes to support it * C constants: LSM_POOL_ELEMENT_TYPE_VOLUME_FULL, LSM_POOL_ELEMENT_TYPE_VOLUME_THIN * Py constants: Pool.ELEMENT_TYPE_VOLUME_FULL, Poll.ELEMENT_TYPE_THIN lsmcli: * lt - Alias for 'list --type target_ports' * Removed --init for volume-mask, it was broken for targetd (the only user) and instead of fixing we are going to improve targetd to support access groups in the next release - Numerous code improvements, including a big SMI-S plugin refactor, source code documentation corrections - Bug fix: Use correct default values for anonymous uid/gid in lsmcli - Bug fix: simc simulator not working for allowable NULL parameters for: * fs_child_dependency * fs_child_dependency_rm * fs_snapshot_restore - Bug fix: lsm_restd memory leak corrections - Bug fix: NetApp plugin, correctly set export path when caller specifies default in API - Bug fix: Add file locking to sim plugin to prevent concurrent modification - Bug fix: Consistently report common error conditions for NO_STATE_CHANGE, EXISTS_INITIATOR for all plugins - Bug fix: Number of bugs addressed in SMI-S plugin including: * EMC: Correct error path when replicating a volume with a duplicate volume name * HDS: Correctly create thinly provisioned volume on thinly provisioned pool 1.0.0: Sep 7 2014 - Release version 1 - Numerous constants re-naming & removing - Removed the pool create/delete until things work better, esp. WRT SMI-S - Added checks for initiator ID verification - Added checks for vpd 0x83 verification - Simplified error logging (removed domain & level) - Re-named functions for online,offline -> enable,disable - Always use objects instead of object ID in function params - Removed individual files from fs snapshot creation - Add unsupported actions for pools - lsm_capability_set_n uses a -1 to terminate list - Volume status removed, replaced with admin state - Removed ibmiv7k plugin - Explicitly specify python2 - Error path consistency changes (same error for same condition across plug-ins) - Numerous bug fixes 0.1.0: Jul 3 2014 - Release candidate for a 1.0.0 release - Optional data removed - Initiator only functions removed - Pool create from from volumes removed - Code directory structure updated - Target port listing added 0.0.24: Jan 30 2014 - Command line interface (CLI) re-factored and improved to be easier to use and more consistent, man pages have been updated - Command line output now has '-s, --script' for an additional way to output information for consumption in scripts - Command line option '-o' for retrieving optional/extended data for disks & pools - Pool creation/deleting in CLI & python API - Numerous small bug fixes - C API, added ability to list disks, list plugins and retrieve optional data for disks - SSL for SMI-S is more stringent on certificate checking for newer distributions, new URI option "no_ssl_verify=yes" to disable 0.0.23: Nov 27 2013 - Addition of listing disks implemented for SMI-S and Ontap plugins (new, not in C library yet) - Add the ability to list currently installed and usable plug-ins - Verify return types are correct in python client calls - Added the ability to retrieve optional data (new, not in C library yet) - Visibility reductions for python code (somethings were public when should be private - Add calls to create/delete pools (new, not in C library yet) - Add missing initiator type for SAS - Improved vpd83 retrieval for SMI-S - Performance improvements for SMI-S plug-in - Numerous small bug fixes - Nstor plugin, additional testing and bug fixes - lsmd, added call to setgroups and enable full relo and PIE (ASLR) for security improvements - simulator state is now versioned - SCSI Unit Attention uevent handling 0.0.22: Aug 12 2013 - Numerous code improvments/fixes - BZ 968384 - BZ 990577 0.0.21: Jul 16 2013 - IEC binary size handling - Functionality improvements for IBM V7K array - Workaround for python bug on F19 - Bugfix (BZ 968384) - Package plug-ins as separately in rpm packages 0.0.20: May 24 2013 - Python library files now in separate rpm - Additional debug for plug-ins when exceptions occur - iSCSI CHAP support modified to handle both inbound and outbound authentication - VOLUME_THIN Added as new capability flag - IBM V7000 storage array support - NFS export support for targetd - EXPORT_CUSTOM_PATH added capability flag 0.0.19: Apr 20 2013 - Improved E-Series array support - Ontap plug-in: improve performance with many Volumes - lsmcli: Number of corrections on handling unit specifiers - lsmcli: Correct stack track when stdout is written to while closed - Fix build to work with automake >= 1.12 0.0.18: Mar 7 2013 - lsmd: Re-written in C - Simplify fs_delete - Corrections for C client against Python plugin - Testing: Run cross language unit test too - Initial FS support for targetd plugin - Fix multi-arch python issues which prevent py and compiled py files from being identical on different arches 0.0.17: Jan 31 2013 - Inconsistency corrections between C and Python API - Source code documentation updates - NexentaStor plug-in has been added 0.0.16: Jan 1 2013 - lsmcli: Add confirmation prompt for data loss operations - lsmcli: Display enumerated values as text - lsmcli: Exit with 7 for --job-status when not complete - Fixed URI example to reference an existing plug-in - lsmcli: Retrieve plug-in desc. and version (lsmcli --plugin-info) - simc: Implement CHAP auth function (no-op) - lsmcli: Change check for determining if lsmd is running - Disable mirroring for SMI-S as it needs some re-work 0.0.15: Nov 20 2012 - Pool parameter is optional when replicating a volume - Code improvements(Memory leak fix, lsmcli checks if lsmd is running) - Source code documentation updates - Ability to override simulator data storage location - make check target added to run unit tests 0.0.14: Oct 19 2012 - test/cmdline.py added to automatically test what an array supports - Bug fixes (local plug-in execution, smi-s delete clone, code warnings) - targetd: (uri syntax consistency change, initialization code change) - Pool id added to volume information - lsmcli: Added --replicate-volume-range-block-size to retrieve replicated block size 0.0.13: Sep 28 2012 - targetD Feature adds/fixes for initiators, init_granted_to_volume, volumes_accessible_by_init, initiator_grant, initiator_revoke - SMI-S added compatibility with CIM_StorageConfigurationService - SMI-S bug fixes/changes to support XIV arrays (Basic functionality verified) - SMI-S Proxy layer added to allow different internal implementations of smi-s client - Added missing version information for C plug-in API - lsmcli URI can be stored in file .lsmcli in users home directory 0.0.12: Sep 7 2012 - SMI-S plug-in enhancements (Detach before delete, bug fixes for eSeries) - Added version specifier for non-opaque structs in plug-in callback interface - Documentation updates (doxygen, man pages) - Ontap plug-in: support timeout values - lsmcli, return back async. values other than volumes when using --job-status 0.0.11: Aug 13 2012 - SMI-S fixes and improvements (WaitForCopyState, _get_class_instance) - Methods for arrays that don't support access groups to grant access for luns to initiators etc. - ISCSI Chap authentication - System level status field for overall array status - targetd updates for mapping targets to initiators - Simulator updates (python & C) - Removed tog-pegasus dependency (SMI-S is python plug-in) - Removed lsmVolumeStatus as it was implemented and redundant - initscript, check for /var/run and create if missing 0.0.10: July 20 2012 - Simulator plug-in written in C, simc_lsmplugin is available - Numerous updates and re-name for plug-in targetd_lsmplugin - targetd_lsmplugin included in release - Memory leak fixes and improved unit tests - Initial capability query support, implemented for all plug-ins - Flags variable added to API calls, (Warning: C API/ABI breakage, python unaffected) - Bug fixes for NetApp ontap plug-in - SMI-S bug fixes (initiator listing and replication, mode and sync types) - Added ability to specify mirroring async or sync for replication - Added version header file to allow client version header checks 0.0.9: June 12 2012 - Initial checkin of lio plug-in - System filtering via URI (smispy) - Error code mapping (ontap) - Fixed build so same build tarball is used for all binaries 0.0.8: June 4 2012 - Make building of SMI-S CPP plugin optional - Add pkg-config file - SMIS: Fix exception while retrieving Volumes - SMIS: Fix exception while retrieving Volumes - lsm: Add package imports - Make Smis class available in lsm python package - Add option to disable building C unit test - Make simulator classes available in lsm python package - Make ontap class available in lsm python package - Changes to support building on Fedora 17 (v2) - Spec. file updates from feedback from T. Callaway (spot) - F17 linker symbol visibility correction - Remove unneeded build dependencies and cleaned up some warnings - C Updates, client C library feature parity with python 0.0.7: May 11 2012 - Bug fix for smi-s constants - Display formatting improvements - Added header option for lsmcli - Improved version handling for builds - Made terminology consistent - Ability to list visibility for access groups and volumes - Simulator plug-in fully supports all block operations - Added support for multiple systems with a single plug-in instance 0.0.6: Apr 20 2012 - Documentation improvements (man & source code) - Support for access groups - Unified spec files Fedora/RHEL - Package version auto generate - Rpm target added to make - Bug fix for missing optional property on volume retrieval (smispy plug-in) 0.0.5: Apr 6 2012 - Spec file clean-up improvements - Async. operation added to lsmcli and ability to check on job status - Sub volume replication support - Ability to check for child dependencies on VOLUMES, FS and files - SMI-S Bug fixes and improvements 0.0.4: Mar 26 2012 - Restore from snapshot - Job identifiers string instead of integer - Updated license address 0.0.3: Mar 19 2012 - Updated to support better packaging - Fixes for NFS export handling - Simulator persistent state 0.0.2: Mar 11 2012 - Native plugin for NetApp 0.0.1alpha: Feb 3 2012 - First release in package form - Basic operation utilizing SMI-S providers. libstoragemgmt-1.2.3/libstoragemgmt.pc0000664000175000017500000000040412542455451015031 00000000000000prefix=/usr/local exec_prefix=${prefix} libdir=${exec_prefix}/lib includedir=${prefix}/include/libstoragemgmt Name: libstoragemgmt Version: 1.2.3 Description: Storage array management library Requires: Libs: -L${libdir} -lstoragemgmt Cflags: -I${includedir}