Repository: cern-eos/eos
Branch: master
Commit: a94b0f29148b
Files: 2385
Total size: 100.5 MB
Directory structure:
gitextract_gne0gw27/
├── .clang-format
├── .clang-tidy
├── .codeclimate.yml
├── .ctest/
│ └── config.cmake
├── .git-blame-ignore-revs
├── .gitignore
├── .gitlab-ci.yml
├── .gitmodules
├── .ignore
├── .mailmap
├── .pre-commit-config.yaml
├── AUDIT.md
├── ApMon/
│ ├── AUTHORS
│ ├── COPYING
│ ├── ChangeLog
│ ├── INSTALL
│ ├── Makefile
│ ├── NEWS
│ ├── README
│ ├── eos-apmon.spec
│ ├── etc/
│ │ └── logrotate.d/
│ │ └── eosapmond
│ ├── jenkins-build.sh
│ ├── maketar.sh
│ ├── opt/
│ │ └── eos/
│ │ └── apmon/
│ │ └── eosapmond
│ ├── perl/
│ │ └── ApMon/
│ │ ├── ApMon/
│ │ │ ├── BgMonitor.pm
│ │ │ ├── Common.pm
│ │ │ ├── ConfigLoader.pm
│ │ │ ├── ProcInfo.pm
│ │ │ └── XDRUtils.pm
│ │ ├── ApMon.pm
│ │ ├── sendToML.sh
│ │ └── servMon.sh
│ ├── run.sh
│ └── usr/
│ └── lib/
│ └── systemd/
│ └── system/
│ └── eosapmond.service
├── CMakeLists.txt
├── CTestConfig.cmake
├── License
├── README.md
├── archive/
│ ├── CMakeLists.txt
│ ├── eosarch/
│ │ ├── __init__.py
│ │ ├── archivefile.py
│ │ ├── asynchandler.py
│ │ ├── configuration.py
│ │ ├── exceptions.py
│ │ ├── processinfo.py
│ │ ├── tests/
│ │ │ ├── __init__.py
│ │ │ ├── env.py
│ │ │ └── test_archivefile.py
│ │ ├── transfer.py
│ │ └── utils.py
│ ├── eosarch_reconstruct.py
│ ├── eosarch_run.py
│ ├── eosarchived.conf
│ ├── eosarchived.py
│ ├── eosarchived.service
│ ├── eosarchived_env.sysconfig
│ └── opt-eos-xrootd.pth
├── auth_plugin/
│ ├── CMakeLists.txt
│ ├── EosAuthOfs.cc
│ ├── EosAuthOfs.hh
│ ├── EosAuthOfsDirectory.cc
│ ├── EosAuthOfsDirectory.hh
│ ├── EosAuthOfsFile.cc
│ ├── EosAuthOfsFile.hh
│ ├── Namespace.hh
│ ├── ProtoUtils.cc
│ ├── ProtoUtils.hh
│ └── proto/
│ ├── Chksum.proto
│ ├── Chmod.proto
│ ├── DirClose.proto
│ ├── DirFname.proto
│ ├── DirOpen.proto
│ ├── DirRead.proto
│ ├── Exists.proto
│ ├── FS_ctl.proto
│ ├── FileClose.proto
│ ├── FileFname.proto
│ ├── FileOpen.proto
│ ├── FileRead.proto
│ ├── FileStat.proto
│ ├── FileWrite.proto
│ ├── Fsctl.proto
│ ├── GetStats.proto
│ ├── Mkdir.proto
│ ├── Prepare.proto
│ ├── Rem.proto
│ ├── Remdir.proto
│ ├── Rename.proto
│ ├── Request.proto
│ ├── Response.proto
│ ├── Stat.proto
│ ├── Truncate.proto
│ ├── XrdOucErrInfo.proto
│ ├── XrdSecEntity.proto
│ ├── XrdSfsFSctl.proto
│ └── XrdSfsPrep.proto
├── client/
│ ├── CMakeLists.txt
│ ├── Namespace.hh
│ └── grpc/
│ ├── Find.cc
│ ├── GrpcClient.cc
│ ├── GrpcClient.hh
│ ├── GrpcClientAuthProcessor.hh
│ ├── Insert.cc
│ ├── Md.cc
│ ├── Ns.cc
│ ├── NsStat.cc
│ └── Ping.cc
├── cmake/
│ ├── CPUArchFlags.cmake
│ ├── DownloadProject.CMakeLists.cmake.in
│ ├── DownloadProject.cmake
│ ├── EosCompileFlags.cmake
│ ├── EosCoverage.cmake
│ ├── EosFindLibs.cmake
│ ├── EosGraphviz.cmake
│ ├── EosOSDefaults.cmake
│ ├── EosSummary.cmake
│ ├── EosTui.cmake
│ ├── EosTuiInstall.cmake.in
│ ├── EosUtils.cmake
│ ├── FindActiveMQCPP.cmake
│ ├── FindAtomic.cmake
│ ├── FindEosGrpcGateway.cmake
│ ├── FindGRPC.cmake
│ ├── FindGlobus.cmake
│ ├── FindLibevent.cmake
│ ├── FindProtobuf3.cmake
│ ├── FindPythonSitePkg.cmake
│ ├── FindRocksDB.cmake
│ ├── FindScitokens.cmake
│ ├── FindSnappy.cmake
│ ├── FindSparseHash.cmake
│ ├── FindSphinx.cmake
│ ├── FindXRootD.cmake
│ ├── FindZMQ.cmake
│ ├── Findabsl.cmake
│ ├── Findbz2.cmake
│ ├── Finddavix.cmake
│ ├── Findeosfolly.cmake
│ ├── Findfuse.cmake
│ ├── Findfuse3.cmake
│ ├── Findglibc.cmake
│ ├── Findhelp2man.cmake
│ ├── Findisal.cmake
│ ├── Findisal_crypto.cmake
│ ├── Findjemalloc.cmake
│ ├── Findjsoncpp.cmake
│ ├── Findkrb5.cmake
│ ├── Findldap.cmake
│ ├── Findlibbfd.cmake
│ ├── Findlibproc2.cmake
│ ├── Findlibunwind.cmake
│ ├── Findlz4.cmake
│ ├── Findncurses.cmake
│ ├── Findnfs.cmake
│ ├── Findprocps.cmake
│ ├── Findreadline.cmake
│ ├── Finduuid.cmake
│ ├── Findxfs.cmake
│ ├── Findxxhash.cmake
│ ├── Findzstd.cmake
│ ├── cmake_uninstall.cmake.in
│ └── config_spec.cmake.in
├── common/
│ ├── Assert.hh
│ ├── AssistedThread.hh
│ ├── Audit.cc
│ ├── Audit.hh
│ ├── BehaviourConfig.cc
│ ├── BehaviourConfig.hh
│ ├── BufferManager.cc
│ ├── BufferManager.hh
│ ├── CLI11.hpp
│ ├── CMakeLists.txt
│ ├── CloExec.hh
│ ├── ClockGetTime.cc
│ ├── ClockGetTime.hh
│ ├── CommentLog.cc
│ ├── CommentLog.hh
│ ├── ConcurrentQueue.hh
│ ├── Config.cc
│ ├── Config.hh
│ ├── Constants.hh
│ ├── CopyProcess.hh
│ ├── Counter.hh
│ ├── CtaCommon.hh
│ ├── DBG.hh
│ ├── Definitions.hh
│ ├── EosLayoutPrint.cc
│ ├── ErrnoToString.cc
│ ├── ErrnoToString.hh
│ ├── ExpiryCache.hh
│ ├── FileId.hh
│ ├── FileMap.hh
│ ├── FileSystem.cc
│ ├── FileSystem.hh
│ ├── Fmd.cc
│ ├── Fmd.hh
│ ├── FutureWrapper.hh
│ ├── Glob.cc
│ ├── Glob.hh
│ ├── IRWMutex.hh
│ ├── InodeTranslator.hh
│ ├── InstanceName.cc
│ ├── InstanceName.hh
│ ├── IntervalStopwatch.cc
│ ├── IntervalStopwatch.hh
│ ├── IoPipe.hh
│ ├── JeMallocHandler.cc
│ ├── JeMallocHandler.hh
│ ├── LOGGING.md
│ ├── LRU.hh
│ ├── LayoutId.hh
│ ├── LinuxFds.hh
│ ├── LinuxMemConsumption.hh
│ ├── LinuxStat.hh
│ ├── LinuxTotalMem.hh
│ ├── Locators.cc
│ ├── Locators.hh
│ ├── Logging.cc
│ ├── Logging.hh
│ ├── Macros.hh
│ ├── Mapping.cc
│ ├── Mapping.hh
│ ├── Murmur3.hh
│ ├── MutexLatencyWatcher.cc
│ ├── MutexLatencyWatcher.hh
│ ├── Namespace.hh
│ ├── OAuth.cc
│ ├── OAuth.hh
│ ├── ObserverMgr.hh
│ ├── Parallel.hh
│ ├── ParseUtils.hh
│ ├── PasswordHandler.hh
│ ├── Path.hh
│ ├── PthreadRWMutex.cc
│ ├── PthreadRWMutex.hh
│ ├── QuarkDBHealthParser.hh
│ ├── RWMutex.cc
│ ├── RWMutex.hh
│ ├── RateLimit.cc
│ ├── RateLimit.hh
│ ├── RegexWrapper.cc
│ ├── RegexWrapper.hh
│ ├── Report.cc
│ ├── Report.hh
│ ├── SecEntity.hh
│ ├── ShardedCache.hh
│ ├── SharedCallbackList.hh
│ ├── SharedMutex.cc
│ ├── SharedMutex.hh
│ ├── ShellCmd.cc
│ ├── ShellCmd.hh
│ ├── ShellExecutor.cc
│ ├── ShellExecutor.hh
│ ├── StackTrace.hh
│ ├── StacktraceHere.cc
│ ├── StacktraceHere.hh
│ ├── Statfs.cc
│ ├── Statfs.hh
│ ├── Statistics.hh
│ ├── Status.hh
│ ├── SteadyClock.hh
│ ├── Strerror_r_wrapper.cc
│ ├── Strerror_r_wrapper.hh
│ ├── StringConversion.cc
│ ├── StringConversion.hh
│ ├── StringSplit.hh
│ ├── StringTokenizer.cc
│ ├── StringTokenizer.hh
│ ├── StringUtils.hh
│ ├── SymKeys.cc
│ ├── SymKeys.hh
│ ├── SyncAll.hh
│ ├── SystemClock.hh
│ ├── ThreadPool.hh
│ ├── Timing.hh
│ ├── UnixGroupsFetcher.cc
│ ├── UnixGroupsFetcher.hh
│ ├── Untraceable.hh
│ ├── UriCapCipher.cc
│ ├── UriCapCipher.hh
│ ├── Utils.cc
│ ├── Utils.hh
│ ├── VirtualIdentity.cc
│ ├── VirtualIdentity.hh
│ ├── WFEClient.hh
│ ├── WaitInterval.hh
│ ├── WebNotify.cc
│ ├── WebNotify.hh
│ ├── XattrCompat.hh
│ ├── XrdConnPool.cc
│ ├── XrdConnPool.hh
│ ├── XrdErrorMap.cc
│ ├── XrdErrorMap.hh
│ ├── async/
│ │ ├── ExecutorMgr.hh
│ │ └── OpaqueFuture.hh
│ ├── blake3/
│ │ ├── README.md
│ │ ├── blake3.c
│ │ ├── blake3.h
│ │ ├── blake3_avx2.c
│ │ ├── blake3_avx2_x86-64_unix.S
│ │ ├── blake3_avx512.c
│ │ ├── blake3_avx512_x86-64_unix.S
│ │ ├── blake3_dispatch.c
│ │ ├── blake3_impl.h
│ │ ├── blake3_neon.c
│ │ ├── blake3_portable.c
│ │ ├── blake3_sse2.c
│ │ ├── blake3_sse2_x86-64_unix.S
│ │ ├── blake3_sse41.c
│ │ ├── blake3_sse41_x86-64_unix.S
│ │ └── main.c
│ ├── concurrency/
│ │ ├── AlignMacros.hh
│ │ ├── AlignedArray.hh
│ │ ├── AtomicUniquePtr.h
│ │ ├── RCULite.hh
│ │ ├── ThreadEpochCounter.cc
│ │ └── ThreadEpochCounter.hh
│ ├── config/
│ │ ├── ConfigParsing.cc
│ │ ├── ConfigParsing.hh
│ │ └── ConfigStore.hh
│ ├── crc32c/
│ │ ├── crc32c.cc
│ │ ├── crc32c.h
│ │ ├── crc32ctables.cc
│ │ └── crc32ctables.h
│ ├── doxygen.hh
│ ├── eos_cta_pb/
│ │ ├── CMakeLists.txt
│ │ └── EosCtaAlertHandler.hh
│ ├── exception/
│ │ ├── Exception.cc
│ │ └── Exception.hh
│ ├── highwayhash/
│ │ ├── arch_specific.h
│ │ ├── c_bindings.h
│ │ ├── compiler_specific.h
│ │ ├── data_parallel.h
│ │ ├── endianess.h
│ │ ├── hh_avx2.h
│ │ ├── hh_buffer.h
│ │ ├── hh_neon.h
│ │ ├── hh_portable.h
│ │ ├── hh_sse41.h
│ │ ├── hh_types.h
│ │ ├── hh_vsx.h
│ │ ├── highwayhash.h
│ │ ├── highwayhash_target.h
│ │ ├── highwayhash_test_target.h
│ │ ├── iaca.h
│ │ ├── instruction_sets.h
│ │ ├── load3.h
│ │ ├── nanobenchmark.h
│ │ ├── os_mac.h
│ │ ├── os_specific.h
│ │ ├── profiler.h
│ │ ├── robust_statistics.h
│ │ ├── scalar.h
│ │ ├── scalar_sip_tree_hash.h
│ │ ├── sip_hash.h
│ │ ├── sip_tree_hash.h
│ │ ├── state_helpers.h
│ │ ├── tsc_timer.h
│ │ ├── vector128.h
│ │ ├── vector256.h
│ │ ├── vector_neon.h
│ │ └── vector_test_target.h
│ ├── hopscotch_hash.hh
│ ├── hopscotch_map.hh
│ ├── http/
│ │ ├── HttpHandler.hh
│ │ ├── HttpRequest.cc
│ │ ├── HttpRequest.hh
│ │ ├── HttpResponse.cc
│ │ ├── HttpResponse.hh
│ │ ├── HttpServer.cc
│ │ ├── HttpServer.hh
│ │ ├── MimeTypes.hh
│ │ ├── OwnCloud.hh
│ │ ├── PlainHttpResponse.hh
│ │ ├── ProtocolHandler.hh
│ │ ├── ProtocolHandlerFactory.hh
│ │ └── s3/
│ │ ├── S3Handler.cc
│ │ ├── S3Handler.hh
│ │ └── S3Response.hh
│ ├── json/
│ │ ├── Json.hh
│ │ ├── JsonCppJsonifier.hh
│ │ ├── Jsonifiable.hh
│ │ └── Jsonifier.hh
│ ├── mq/
│ │ ├── FsChangeListener.cc
│ │ ├── FsChangeListener.hh
│ │ ├── GlobalConfigChangeListener.cc
│ │ ├── GlobalConfigChangeListener.hh
│ │ ├── LocalHash.cc
│ │ ├── LocalHash.hh
│ │ ├── MessagingRealm.cc
│ │ ├── MessagingRealm.hh
│ │ ├── Namespace.hh
│ │ ├── QdbListener.cc
│ │ ├── QdbListener.hh
│ │ ├── SharedDequeProvider.cc
│ │ ├── SharedDequeProvider.hh
│ │ ├── SharedHashProvider.cc
│ │ ├── SharedHashProvider.hh
│ │ ├── SharedHashWrapper.cc
│ │ ├── SharedHashWrapper.hh
│ │ └── XrdMqTiming.hh
│ ├── mutextest/
│ │ └── RWMutexTest.cc
│ ├── plugin_manager/
│ │ ├── DynamicLibrary.cc
│ │ ├── DynamicLibrary.hh
│ │ ├── Plugin.hh
│ │ ├── PluginManager.cc
│ │ └── PluginManager.hh
│ ├── shaping/
│ │ ├── IoStatsKey.hh
│ │ └── SlidingWindowStats.hh
│ ├── shellexectest/
│ │ └── shell_exec_test.cc
│ ├── stringencoders/
│ │ ├── modp_numtoa.c
│ │ └── modp_numtoa.h
│ ├── table_formatter/
│ │ ├── TableCell.cc
│ │ ├── TableCell.hh
│ │ ├── TableFormatterBase.cc
│ │ ├── TableFormatterBase.hh
│ │ └── TableFormatting.hh
│ ├── thread_id.hh
│ ├── token/
│ │ ├── EosTok.cc
│ │ ├── EosTok.hh
│ │ ├── SciToken.cc
│ │ ├── SciToken.hh
│ │ ├── Token.hh
│ │ ├── eosscitokenmodule.c
│ │ ├── example/
│ │ │ └── eossci.py
│ │ ├── scitoken.h
│ │ └── setup.py
│ ├── ulib/
│ │ ├── hash_align.h
│ │ ├── hash_align_prot.h
│ │ ├── ulib.c
│ │ ├── util_algo.h
│ │ └── util_class.h
│ └── utils/
│ ├── BackOffInvoker.hh
│ ├── BindArguments.hh
│ ├── ContainerUtils.hh
│ ├── RandUtils.hh
│ ├── TypeTraits.hh
│ ├── XrdUtils.cc
│ └── XrdUtils.hh
├── console/
│ ├── CMakeLists.txt
│ ├── CommandFramework.cc
│ ├── CommandFramework.hh
│ ├── ConsoleArgParser.cc
│ ├── ConsoleArgParser.hh
│ ├── ConsoleCliCommand.cc
│ ├── ConsoleCliCommand.hh
│ ├── ConsoleCompletion.cc
│ ├── ConsoleCompletion.hh
│ ├── ConsoleMain.cc
│ ├── ConsoleMain.hh
│ ├── ConsoleMainExecutable.cc
│ ├── GlobalOptions.hh
│ ├── ICommand.hh
│ ├── README.md
│ ├── RegexUtil.cc
│ ├── RegexUtil.hh
│ ├── commands/
│ │ ├── HealthCommand.cc
│ │ ├── HealthCommand.hh
│ │ ├── coms/
│ │ │ └── unused/
│ │ │ ├── com_access.cc
│ │ │ ├── com_accounting.cc
│ │ │ ├── com_archive.cc
│ │ │ ├── com_attr.cc
│ │ │ ├── com_backup.cc
│ │ │ ├── com_cd.cc
│ │ │ ├── com_chmod.cc
│ │ │ ├── com_chown.cc
│ │ │ ├── com_clear.cc
│ │ │ ├── com_cp.cc
│ │ │ ├── com_daemon.cc
│ │ │ ├── com_debug.cc
│ │ │ ├── com_du.cc
│ │ │ ├── com_evict.cc
│ │ │ ├── com_file.cc
│ │ │ ├── com_fuse.cc
│ │ │ ├── com_fusex.cc
│ │ │ ├── com_geosched.cc
│ │ │ ├── com_health.cc
│ │ │ ├── com_info.cc
│ │ │ ├── com_inspector.cc
│ │ │ ├── com_json.cc
│ │ │ ├── com_license.cc
│ │ │ ├── com_ln.cc
│ │ │ ├── com_map.cc
│ │ │ ├── com_member.cc
│ │ │ ├── com_mkdir.cc
│ │ │ ├── com_motd.cc
│ │ │ ├── com_mv.cc
│ │ │ ├── com_old_find.cc
│ │ │ ├── com_print.cc
│ │ │ ├── com_proto_access.cc
│ │ │ ├── com_proto_acl.cc
│ │ │ ├── com_proto_config.cc
│ │ │ ├── com_proto_convert.cc
│ │ │ ├── com_proto_debug.cc
│ │ │ ├── com_proto_devices.cc
│ │ │ ├── com_proto_df.cc
│ │ │ ├── com_proto_find.cc
│ │ │ ├── com_proto_fs.cc
│ │ │ ├── com_proto_fsck.cc
│ │ │ ├── com_proto_group.cc
│ │ │ ├── com_proto_io.cc
│ │ │ ├── com_proto_node.cc
│ │ │ ├── com_proto_ns.cc
│ │ │ ├── com_proto_quota.cc
│ │ │ ├── com_proto_recycle.cc
│ │ │ ├── com_proto_register.cc
│ │ │ ├── com_proto_rm.cc
│ │ │ ├── com_proto_route.cc
│ │ │ ├── com_proto_sched.cc
│ │ │ ├── com_proto_space.cc
│ │ │ ├── com_proto_token.cc
│ │ │ ├── com_pwd.cc
│ │ │ ├── com_quit.cc
│ │ │ ├── com_quota.cc
│ │ │ ├── com_rclone.cc
│ │ │ ├── com_reconnect.cc
│ │ │ ├── com_report.cc
│ │ │ ├── com_rm.cc
│ │ │ ├── com_rmdir.cc
│ │ │ ├── com_role.cc
│ │ │ ├── com_rtlog.cc
│ │ │ ├── com_scitoken.cc
│ │ │ ├── com_silent.cc
│ │ │ ├── com_squash.cc
│ │ │ ├── com_stat.cc
│ │ │ ├── com_status.cc
│ │ │ ├── com_test.cc
│ │ │ ├── com_timing.cc
│ │ │ ├── com_touch.cc
│ │ │ ├── com_tracker.cc
│ │ │ ├── com_version.cc
│ │ │ ├── com_vid.cc
│ │ │ ├── com_who.cc
│ │ │ └── com_whoami.cc
│ │ ├── helpers/
│ │ │ ├── AclHelper.cc
│ │ │ ├── AclHelper.hh
│ │ │ ├── FsHelper.cc
│ │ │ ├── FsHelper.hh
│ │ │ ├── FsckHelper.cc
│ │ │ ├── FsckHelper.hh
│ │ │ ├── ICmdHelper.cc
│ │ │ ├── ICmdHelper.hh
│ │ │ ├── NewfindHelper.cc
│ │ │ ├── NewfindHelper.hh
│ │ │ ├── NodeHelper.cc
│ │ │ ├── NodeHelper.hh
│ │ │ ├── RecycleHelper.cc
│ │ │ ├── RecycleHelper.hh
│ │ │ ├── TokenHelper.cc
│ │ │ ├── TokenHelper.hh
│ │ │ └── jwk_generator/
│ │ │ ├── c_resource.hpp
│ │ │ ├── errors.hpp
│ │ │ ├── jwk_generator.hpp
│ │ │ ├── keyspecs/
│ │ │ │ ├── ec_key.hpp
│ │ │ │ └── rsa_key.hpp
│ │ │ ├── libs/
│ │ │ │ ├── base64_url.hpp
│ │ │ │ ├── json.hpp
│ │ │ │ └── uuid.hpp
│ │ │ └── openssl_wrapper.hpp
│ │ └── native/
│ │ ├── CoreNativeCommands.cc
│ │ ├── LegacySymbols.cc
│ │ ├── access-proto-native.cc
│ │ ├── accounting-cmd-native.cc
│ │ ├── acl-proto-native.cc
│ │ ├── archive-cmd-native.cc
│ │ ├── attr-cmd-native.cc
│ │ ├── backup-cmd-native.cc
│ │ ├── cat-com-native.cc
│ │ ├── cd-cmd-native.cc
│ │ ├── chmod-cmd-native.cc
│ │ ├── chown-cmd-native.cc
│ │ ├── clear-cmd-native.cc
│ │ ├── config-proto-native.cc
│ │ ├── convert-proto-native.cc
│ │ ├── cp-cmd-native.cc
│ │ ├── daemon-native.cc
│ │ ├── debug-cmd-native.cc
│ │ ├── devices-proto-native.cc
│ │ ├── df-proto-native.cc
│ │ ├── du-native.cc
│ │ ├── du-proto-native.cc
│ │ ├── evict-cmd-native.cc
│ │ ├── file-cmd-native.cc
│ │ ├── fileinfo-alias.cc
│ │ ├── find-proto-native.cc
│ │ ├── fs-proto-native.cc
│ │ ├── fsck-proto-native.cc
│ │ ├── fuse-native.cc
│ │ ├── fusex-cmd-native.cc
│ │ ├── geosched-cmd-native.cc
│ │ ├── group-proto-native.cc
│ │ ├── health-native.cc
│ │ ├── info-alias.cc
│ │ ├── info-native.cc
│ │ ├── inspector-proto-native.cc
│ │ ├── io-proto-native.cc
│ │ ├── license-native.cc
│ │ ├── ln-cmd-native.cc
│ │ ├── ls-cmd-native.cc
│ │ ├── ls-compat.cc
│ │ ├── map-cmd-native.cc
│ │ ├── member-cmd-native.cc
│ │ ├── mkdir-cmd-native.cc
│ │ ├── motd-cmd-native.cc
│ │ ├── mv-alias.cc
│ │ ├── node-proto-native.cc
│ │ ├── ns-proto-native.cc
│ │ ├── oldfind-cmd-native.cc
│ │ ├── pwd-native.cc
│ │ ├── quota-proto-native.cc
│ │ ├── rclone-cmd-native.cc
│ │ ├── reconnect-native.cc
│ │ ├── recycle-proto-native.cc
│ │ ├── register-proto-native.cc
│ │ ├── report-native.cc
│ │ ├── rm-proto-native.cc
│ │ ├── rmdir-cmd-native.cc
│ │ ├── role-native.cc
│ │ ├── route-proto-native.cc
│ │ ├── rtlog-cmd-native.cc
│ │ ├── sched-proto-native.cc
│ │ ├── scitoken-native.cc
│ │ ├── space-proto-native.cc
│ │ ├── squash-cmd-native.cc
│ │ ├── stat-native.cc
│ │ ├── status-native.cc
│ │ ├── test-native.cc
│ │ ├── token-proto-native.cc
│ │ ├── touch-cmd-native.cc
│ │ ├── tracker-proto-native.cc
│ │ ├── tui-native.cc
│ │ ├── version-cmd-native.cc
│ │ ├── vid-cmd-native.cc
│ │ ├── who-cmd-native.cc
│ │ └── whoami-cmd-native.cc
│ ├── eos-iam-mapfile
│ ├── eosadmin
│ └── eosreport
├── coverage/
│ └── eoslcov.rc
├── debian/
│ ├── compat
│ ├── control.template
│ ├── copyright
│ ├── eos-client.install
│ ├── eos-fusex.install
│ ├── eos-fusex.postinst
│ ├── eos-test.install
│ ├── eos-testkeytab.install
│ ├── eos-testkeytab.postinst
│ ├── rules
│ └── source/
│ └── format
├── doc/
│ ├── _themes/
│ │ ├── solar_theme/
│ │ │ ├── __init__.py
│ │ │ ├── layout.html
│ │ │ ├── static/
│ │ │ │ ├── solar.css
│ │ │ │ └── solarized-dark.css
│ │ │ └── theme.conf
│ │ └── sphinx13/
│ │ ├── layout.html
│ │ └── theme.conf
│ ├── citrine/
│ │ ├── Doxyfile
│ │ ├── backup_clone.rst
│ │ ├── backup_clone.txt
│ │ ├── clicommands/
│ │ │ ├── accounting.rst
│ │ │ ├── acl.rst
│ │ │ ├── archive.rst
│ │ │ ├── attr.rst
│ │ │ ├── backup.rst
│ │ │ ├── cd.rst
│ │ │ ├── chmod.rst
│ │ │ ├── chown.rst
│ │ │ ├── clear.rst
│ │ │ ├── config.rst
│ │ │ ├── console.rst
│ │ │ ├── cp.rst
│ │ │ ├── debug.rst
│ │ │ ├── evict.rst
│ │ │ ├── exit.rst
│ │ │ ├── file.rst
│ │ │ ├── fileinfo.rst
│ │ │ ├── find.rst
│ │ │ ├── fs.rst
│ │ │ ├── fsck.rst
│ │ │ ├── fuse.rst
│ │ │ ├── fusex.rst
│ │ │ ├── geosched.rst
│ │ │ ├── group.rst
│ │ │ ├── health.rst
│ │ │ ├── help.rst
│ │ │ ├── info.rst
│ │ │ ├── inspector.rst
│ │ │ ├── io.rst
│ │ │ ├── json.rst
│ │ │ ├── license.rst
│ │ │ ├── ln.rst
│ │ │ ├── ls.rst
│ │ │ ├── map.rst
│ │ │ ├── member.rst
│ │ │ ├── mkdir.rst
│ │ │ ├── motd.rst
│ │ │ ├── mv.rst
│ │ │ ├── newfind.rst
│ │ │ ├── node.rst
│ │ │ ├── ns.rst
│ │ │ ├── pointq.rst
│ │ │ ├── pwd.rst
│ │ │ ├── question.rst
│ │ │ ├── quit.rst
│ │ │ ├── quota.rst
│ │ │ ├── reconnect.rst
│ │ │ ├── recycle.rst
│ │ │ ├── rm.rst
│ │ │ ├── rmdir.rst
│ │ │ ├── role.rst
│ │ │ ├── route.rst
│ │ │ ├── rtlog.rst
│ │ │ ├── silent.rst
│ │ │ ├── space.rst
│ │ │ ├── squash.rst
│ │ │ ├── stat.rst
│ │ │ ├── test.rst
│ │ │ ├── timing.rst
│ │ │ ├── token.rst
│ │ │ ├── touch.rst
│ │ │ ├── tracker.rst
│ │ │ ├── transfer.rst
│ │ │ ├── version.rst
│ │ │ ├── vid.rst
│ │ │ ├── who.rst
│ │ │ └── whoami.rst
│ │ ├── clicommands.rst
│ │ ├── conf.py
│ │ ├── configuration/
│ │ │ ├── archive.rst
│ │ │ ├── balancing.rst
│ │ │ ├── converter.rst
│ │ │ ├── converter_engine.rst
│ │ │ ├── draining.rst
│ │ │ ├── egi.rst
│ │ │ ├── fsck.rst
│ │ │ ├── fuse.rst
│ │ │ ├── fusex.rst
│ │ │ ├── geobalancer.rst
│ │ │ ├── geoscheduling.rst
│ │ │ ├── geotags.rst
│ │ │ ├── groupbalancer.rst
│ │ │ ├── groupdrainer.rst
│ │ │ ├── http.rst
│ │ │ ├── http_tpc.rst
│ │ │ ├── import.rst
│ │ │ ├── inspector.rst
│ │ │ ├── kinetic.rst
│ │ │ ├── logicalpath.rst
│ │ │ ├── lru.rst
│ │ │ ├── master.rst
│ │ │ ├── master_quarkdb.rst
│ │ │ ├── namespace.rst
│ │ │ ├── permission.rst
│ │ │ ├── proxys.rst
│ │ │ ├── qos.rst
│ │ │ ├── quarkdb.rst
│ │ │ ├── quota.rst
│ │ │ ├── recyclebin.rst
│ │ │ ├── route.rst
│ │ │ ├── s3.rst
│ │ │ ├── scheduler.rst
│ │ │ ├── tracker.rst
│ │ │ ├── transfer.rst
│ │ │ ├── tty.rst
│ │ │ └── wfe.rst
│ │ ├── configuration.rst
│ │ ├── contents.rst
│ │ ├── develop.rst
│ │ ├── generate_docs.py
│ │ ├── index.rst
│ │ ├── install.rst
│ │ ├── intro.rst
│ │ ├── quickstart/
│ │ │ ├── admin/
│ │ │ │ ├── configure.rst
│ │ │ │ └── krb5.rst
│ │ │ ├── boxed.rst
│ │ │ ├── client/
│ │ │ │ └── configure.rst
│ │ │ ├── docker_image.rst
│ │ │ ├── install.rst
│ │ │ ├── kubernetes.rst
│ │ │ ├── ns_quarkdb.rst
│ │ │ ├── setup_repo.rst
│ │ │ ├── uboxed.rst
│ │ │ ├── ubuntu.rst
│ │ │ └── update_eos4to5.rst
│ │ ├── quickstart.rst
│ │ ├── releases/
│ │ │ ├── amber.rst
│ │ │ ├── beryl-release.rst
│ │ │ ├── beryl.rst
│ │ │ ├── citrine-release.rst
│ │ │ ├── citrine.rst
│ │ │ ├── diopside-release.rst
│ │ │ └── diopside.rst
│ │ ├── releases.rst
│ │ ├── restapi/
│ │ │ ├── fileinfo.rst
│ │ │ ├── format.rst
│ │ │ ├── fs.rst
│ │ │ ├── group.rst
│ │ │ ├── grpc.rst
│ │ │ ├── node.rst
│ │ │ ├── ns.rst
│ │ │ ├── putrange.rst
│ │ │ ├── space.rst
│ │ │ ├── version.rst
│ │ │ └── who.rst
│ │ ├── restapi.rst
│ │ ├── taperestapi/
│ │ │ └── configuration.rst
│ │ ├── taperestapi.rst
│ │ ├── using/
│ │ │ ├── archive.rst
│ │ │ ├── attributelocks.rst
│ │ │ ├── eos_services.rst
│ │ │ ├── fusex.rst
│ │ │ ├── oauth2.rst
│ │ │ ├── policies.rst
│ │ │ ├── priorities.rst
│ │ │ ├── rain.rst
│ │ │ ├── reports.rst
│ │ │ ├── sharedfs.rst
│ │ │ ├── squashfs.rst
│ │ │ ├── systemd.rst
│ │ │ ├── tokens.rst
│ │ │ └── versions.rst
│ │ └── using.rst
│ └── diopside/
│ ├── architecture/
│ │ └── index.rst
│ ├── blog/
│ │ └── features.rst
│ ├── conf.py
│ ├── configuration.rst
│ ├── faq/
│ │ ├── exotic.rst
│ │ └── index.rst
│ ├── index.rst
│ ├── introduction/
│ │ └── index.rst
│ ├── manual/
│ │ ├── configuration.rst
│ │ ├── develop.rst
│ │ ├── egi.rst
│ │ ├── formats.rst
│ │ ├── getting-started.rst
│ │ ├── hardware-installation.rst
│ │ ├── index.rst
│ │ ├── interfaces.rst
│ │ ├── microservices.rst
│ │ ├── protocols.rst
│ │ └── using.rst
│ ├── my-changes.patch
│ └── releases/
│ ├── #diopside-release.rst#
│ ├── 5.4.0/
│ │ └── recycle_bin_config.rst
│ ├── amber.rst
│ ├── beryl-release.rst
│ ├── beryl.rst
│ ├── citrine-release.rst
│ ├── citrine.rst
│ ├── diopside-release.rst
│ ├── diopside.rst
│ └── index.rst
├── elrepopackage.spec
├── eos.spec.in
├── fst/
│ ├── CMakeLists.txt
│ ├── Config.cc
│ ├── Config.hh
│ ├── Deletion.hh
│ ├── Health.cc
│ ├── Health.hh
│ ├── Load.cc
│ ├── Load.hh
│ ├── Namespace.hh
│ ├── ScanDir.cc
│ ├── ScanDir.hh
│ ├── Verify.hh
│ ├── XrdFstOfs.cc
│ ├── XrdFstOfs.hh
│ ├── XrdFstOfsFile.cc
│ ├── XrdFstOfsFile.hh
│ ├── XrdFstOss.cc
│ ├── XrdFstOss.hh
│ ├── XrdFstOssFile.cc
│ ├── XrdFstOssFile.hh
│ ├── checksum/
│ │ ├── Adler.cc
│ │ ├── Adler.hh
│ │ ├── BLAKE3.hh
│ │ ├── CRC32.hh
│ │ ├── CRC32C.hh
│ │ ├── CRC64.hh
│ │ ├── CheckSum.cc
│ │ ├── CheckSum.hh
│ │ ├── ChecksumGroup.hh
│ │ ├── ChecksumPlugins.hh
│ │ ├── HWH64.hh
│ │ ├── MD5.hh
│ │ ├── SHA1.hh
│ │ ├── SHA256.hh
│ │ ├── XXHASH64.hh
│ │ └── cycletimer.h
│ ├── eoscp.cc
│ ├── filemd/
│ │ ├── FmdAttr.cc
│ │ ├── FmdAttr.hh
│ │ ├── FmdHandler.cc
│ │ ├── FmdHandler.hh
│ │ ├── FmdMgm.cc
│ │ └── FmdMgm.hh
│ ├── http/
│ │ ├── HttpHandler.cc
│ │ ├── HttpHandler.hh
│ │ ├── HttpHandlerFstFileCache.cc
│ │ ├── HttpHandlerFstFileCache.hh
│ │ ├── HttpServer.cc
│ │ ├── HttpServer.hh
│ │ ├── ProtocolHandlerFactory.hh
│ │ ├── s3/
│ │ │ ├── S3Handler.cc
│ │ │ └── S3Handler.hh
│ │ └── xrdhttp/
│ │ ├── EosFstHttpHandler.cc
│ │ ├── EosFstHttpHandler.hh
│ │ └── README.md
│ ├── io/
│ │ ├── AsyncMetaHandler.cc
│ │ ├── AsyncMetaHandler.hh
│ │ ├── ChunkHandler.cc
│ │ ├── ChunkHandler.hh
│ │ ├── FileIo.cc
│ │ ├── FileIo.hh
│ │ ├── FileIoPlugin-Server.cc
│ │ ├── FileIoPlugin.cc
│ │ ├── FileIoPlugin.hh
│ │ ├── FileIoPluginCommon.hh
│ │ ├── SimpleHandler.cc
│ │ ├── SimpleHandler.hh
│ │ ├── VectChunkHandler.cc
│ │ ├── VectChunkHandler.hh
│ │ ├── davix/
│ │ │ ├── DavixIo.cc
│ │ │ └── DavixIo.hh
│ │ ├── local/
│ │ │ ├── FsIo.cc
│ │ │ ├── FsIo.hh
│ │ │ ├── LocalIo.cc
│ │ │ └── LocalIo.hh
│ │ ├── nfs/
│ │ │ ├── NfsIo.cc
│ │ │ └── NfsIo.hh
│ │ └── xrd/
│ │ ├── ResponseCollector.cc
│ │ ├── ResponseCollector.hh
│ │ ├── XrdIo.cc
│ │ └── XrdIo.hh
│ ├── layout/
│ │ ├── HeaderCRC.cc
│ │ ├── HeaderCRC.hh
│ │ ├── Layout.cc
│ │ ├── Layout.hh
│ │ ├── LayoutPlugin.cc
│ │ ├── LayoutPlugin.hh
│ │ ├── PlainLayout.cc
│ │ ├── PlainLayout.hh
│ │ ├── RaidDpLayout.cc
│ │ ├── RaidDpLayout.hh
│ │ ├── RainBlock.cc
│ │ ├── RainBlock.hh
│ │ ├── RainGroup.cc
│ │ ├── RainGroup.hh
│ │ ├── RainMetaLayout.cc
│ │ ├── RainMetaLayout.hh
│ │ ├── ReedSLayout.cc
│ │ ├── ReedSLayout.hh
│ │ ├── ReplicaParLayout.cc
│ │ ├── ReplicaParLayout.hh
│ │ ├── gf-complete/
│ │ │ ├── .gitignore
│ │ │ ├── AUTHORS
│ │ │ ├── COPYING
│ │ │ ├── ChangeLog
│ │ │ ├── License.txt
│ │ │ ├── Makefile.am
│ │ │ ├── NEWS
│ │ │ ├── README
│ │ │ ├── README.txt
│ │ │ ├── autogen.sh
│ │ │ ├── configure.ac
│ │ │ ├── examples/
│ │ │ │ ├── Makefile.am
│ │ │ │ ├── gf_example_1.c
│ │ │ │ ├── gf_example_2.c
│ │ │ │ ├── gf_example_3.c
│ │ │ │ ├── gf_example_4.c
│ │ │ │ ├── gf_example_5.c
│ │ │ │ ├── gf_example_6.c
│ │ │ │ └── gf_example_7.c
│ │ │ ├── include/
│ │ │ │ ├── gf_complete.h
│ │ │ │ ├── gf_general.h
│ │ │ │ ├── gf_int.h
│ │ │ │ ├── gf_method.h
│ │ │ │ ├── gf_rand.h
│ │ │ │ ├── gf_w16.h
│ │ │ │ ├── gf_w32.h
│ │ │ │ ├── gf_w4.h
│ │ │ │ ├── gf_w64.h
│ │ │ │ └── gf_w8.h
│ │ │ ├── m4/
│ │ │ │ ├── ax_check_compile_flag.m4
│ │ │ │ ├── ax_ext.m4
│ │ │ │ ├── ax_gcc_x86_avx_xgetbv.m4
│ │ │ │ ├── ax_gcc_x86_cpuid.m4
│ │ │ │ ├── ltoptions.m4
│ │ │ │ ├── ltsugar.m4
│ │ │ │ └── lt~obsolete.m4
│ │ │ ├── manual/
│ │ │ │ ├── gf-complete.html
│ │ │ │ └── style.css
│ │ │ ├── src/
│ │ │ │ ├── Makefile.am
│ │ │ │ ├── gf.c
│ │ │ │ ├── gf_general.c
│ │ │ │ ├── gf_method.c
│ │ │ │ ├── gf_rand.c
│ │ │ │ ├── gf_w128.c
│ │ │ │ ├── gf_w16.c
│ │ │ │ ├── gf_w32.c
│ │ │ │ ├── gf_w4.c
│ │ │ │ ├── gf_w64.c
│ │ │ │ ├── gf_w8.c
│ │ │ │ ├── gf_wgen.c
│ │ │ │ └── neon/
│ │ │ │ ├── gf_w16_neon.c
│ │ │ │ ├── gf_w32_neon.c
│ │ │ │ ├── gf_w4_neon.c
│ │ │ │ ├── gf_w64_neon.c
│ │ │ │ └── gf_w8_neon.c
│ │ │ ├── test/
│ │ │ │ ├── Makefile.am
│ │ │ │ └── gf_unit.c
│ │ │ └── tools/
│ │ │ ├── Makefile.am
│ │ │ ├── gf_add.c
│ │ │ ├── gf_div.c
│ │ │ ├── gf_inline_time.c
│ │ │ ├── gf_methods.c
│ │ │ ├── gf_mult.c
│ │ │ ├── gf_poly.c
│ │ │ ├── gf_time.c
│ │ │ └── time_tool.sh
│ │ └── jerasure/
│ │ ├── .gitattributes
│ │ ├── .gitignore
│ │ ├── AUTHORS
│ │ ├── COPYING
│ │ ├── ChangeLog
│ │ ├── Examples/
│ │ │ ├── .gitignore
│ │ │ ├── Makefile.am
│ │ │ ├── cauchy_01.c
│ │ │ ├── cauchy_02.c
│ │ │ ├── cauchy_03.c
│ │ │ ├── cauchy_04.c
│ │ │ ├── decoder.c
│ │ │ ├── encode_decode.sh
│ │ │ ├── encoder.c
│ │ │ ├── jerasure_01.c
│ │ │ ├── jerasure_02.c
│ │ │ ├── jerasure_03.c
│ │ │ ├── jerasure_04.c
│ │ │ ├── jerasure_05.c
│ │ │ ├── jerasure_06.c
│ │ │ ├── jerasure_07.c
│ │ │ ├── jerasure_08.c
│ │ │ ├── liberation_01.c
│ │ │ ├── reed_sol_01.c
│ │ │ ├── reed_sol_02.c
│ │ │ ├── reed_sol_03.c
│ │ │ ├── reed_sol_04.c
│ │ │ ├── reed_sol_test_gf.c
│ │ │ ├── reed_sol_time_gf.c
│ │ │ ├── test_all_gfs.sh
│ │ │ ├── test_galois.c
│ │ │ └── time_all_gfs_argv_init.sh
│ │ ├── License.txt
│ │ ├── Makefile.am
│ │ ├── NEWS
│ │ ├── PERF.txt
│ │ ├── README
│ │ ├── configure.ac
│ │ ├── include/
│ │ │ ├── cauchy.h
│ │ │ ├── galois.h
│ │ │ ├── jerasure.h
│ │ │ ├── liberation.h
│ │ │ ├── reed_sol.h
│ │ │ └── timing.h
│ │ ├── m4/
│ │ │ ├── ax_check_compile_flag.m4
│ │ │ ├── ax_ext.m4
│ │ │ ├── ax_gcc_x86_avx_xgetbv.m4
│ │ │ ├── ax_gcc_x86_cpuid.m4
│ │ │ └── ax_require_defined.m4
│ │ └── src/
│ │ ├── Makefile.am
│ │ ├── cauchy.c
│ │ ├── cauchy_best_r6.c
│ │ ├── galois.c
│ │ ├── jerasure.c
│ │ ├── liberation.c
│ │ ├── reed_sol.c
│ │ └── timing.c
│ ├── storage/
│ │ ├── Communicator.cc
│ │ ├── ErrorReport.cc
│ │ ├── FileSystem.cc
│ │ ├── FileSystem.hh
│ │ ├── MgmSyncer.cc
│ │ ├── MonitorVarPartition.hh
│ │ ├── Publish.cc
│ │ ├── Remover.cc
│ │ ├── Report.cc
│ │ ├── Scrub.cc
│ │ ├── Storage.cc
│ │ ├── Storage.hh
│ │ ├── Supervisor.cc
│ │ ├── TrafficShaping.cc
│ │ ├── TrafficShaping.hh
│ │ └── Verify.cc
│ ├── tools/
│ │ ├── Adler32.cc
│ │ ├── CheckBlockXS.cc
│ │ ├── CheckSum.cc
│ │ ├── ComputeBlockXS.cc
│ │ ├── ConvertFileMD.cc
│ │ ├── IoPing.c
│ │ ├── RainCheck.cc
│ │ ├── RainHdrDump.cc
│ │ ├── RecoverRaidDP.cc
│ │ ├── ScanXS.cc
│ │ ├── eosfstinfo
│ │ └── eosfstregister
│ ├── utils/
│ │ ├── CheckFileReadWithPattern.cc
│ │ ├── CreateFileWithPattern.cc
│ │ ├── DiskMeasurements.cc
│ │ ├── DiskMeasurements.hh
│ │ ├── DiskMeasurementsMain.cc
│ │ ├── FSPathHandler.cc
│ │ ├── FSPathHandler.hh
│ │ ├── FTSWalkTree.hh
│ │ ├── IoPriority.cc
│ │ ├── IoPriority.hh
│ │ ├── OpenFileTracker.cc
│ │ ├── OpenFileTracker.hh
│ │ ├── ScanRate.cc
│ │ ├── ScanRate.hh
│ │ ├── StdFSWalkTree.hh
│ │ ├── TpcInfo.hh
│ │ ├── TransformAttr.hh
│ │ ├── XrdOfsPathHandler.cc
│ │ └── XrdOfsPathHandler.hh
│ └── xrdcl_plugins/
│ ├── CMakeLists.txt
│ ├── RainFile.cc
│ ├── RainFile.hh
│ ├── RainPlugin.cc
│ └── RainPlugin.hh
├── fusex/
│ ├── CMakeLists.txt
│ ├── README.md
│ ├── auth/
│ │ ├── AuthenticationGroup.cc
│ │ ├── AuthenticationGroup.hh
│ │ ├── BoundIdentityProvider.cc
│ │ ├── BoundIdentityProvider.hh
│ │ ├── CMakeLists.txt
│ │ ├── CredentialCache.hh
│ │ ├── CredentialFinder.cc
│ │ ├── CredentialFinder.hh
│ │ ├── CredentialValidator.cc
│ │ ├── CredentialValidator.hh
│ │ ├── DirectoryIterator.cc
│ │ ├── DirectoryIterator.hh
│ │ ├── EnvironmentReader.cc
│ │ ├── EnvironmentReader.hh
│ │ ├── FileDescriptor.hh
│ │ ├── JailIdentifier.cc
│ │ ├── JailIdentifier.hh
│ │ ├── Logbook.cc
│ │ ├── Logbook.hh
│ │ ├── LoginIdentifier.cc
│ │ ├── LoginIdentifier.hh
│ │ ├── ProcessCache.cc
│ │ ├── ProcessCache.hh
│ │ ├── ProcessInfo.cc
│ │ ├── ProcessInfo.hh
│ │ ├── README.md
│ │ ├── RmInfo.cc
│ │ ├── RmInfo.hh
│ │ ├── ScopedEUidSetter.hh
│ │ ├── ScopedFsUidSetter.hh
│ │ ├── SecurityChecker.cc
│ │ ├── SecurityChecker.hh
│ │ ├── UnixAuthenticator.cc
│ │ ├── UnixAuthenticator.hh
│ │ ├── UserCredentialFactory.cc
│ │ ├── UserCredentialFactory.hh
│ │ ├── UserCredentials.hh
│ │ ├── Utils.cc
│ │ ├── Utils.hh
│ │ ├── UuidStore.cc
│ │ └── UuidStore.hh
│ ├── backend/
│ │ ├── backend.cc
│ │ └── backend.hh
│ ├── benchmark/
│ │ ├── CMakeLists.txt
│ │ ├── eos-fusex-certify
│ │ └── fusex-benchmark.cc
│ ├── cap/
│ │ ├── cap.cc
│ │ └── cap.hh
│ ├── data/
│ │ ├── bufferll.hh
│ │ ├── cache.cc
│ │ ├── cache.hh
│ │ ├── cacheconfig.hh
│ │ ├── cachehandler.hh
│ │ ├── cachelock.hh
│ │ ├── cachesyncer.cc
│ │ ├── cachesyncer.hh
│ │ ├── data.cc
│ │ ├── data.hh
│ │ ├── dircleaner.cc
│ │ ├── dircleaner.hh
│ │ ├── diskcache.cc
│ │ ├── diskcache.hh
│ │ ├── interval_tree.hh
│ │ ├── io.hh
│ │ ├── journalcache.cc
│ │ ├── journalcache.hh
│ │ ├── memorycache.cc
│ │ ├── memorycache.hh
│ │ ├── rbtree.hh
│ │ ├── xrdclproxy.cc
│ │ └── xrdclproxy.hh
│ ├── eoscfsd/
│ │ ├── README.md
│ │ ├── cfs.sh
│ │ ├── cfskey.hh
│ │ ├── cfslogin.cc
│ │ ├── cfslogin.hh
│ │ ├── cfsmapping.hh
│ │ ├── cfsquota.hh
│ │ ├── cfsrecycle.cc
│ │ ├── cfsrecycle.hh
│ │ ├── cfsutil.hh
│ │ ├── cfsvattr.hh
│ │ ├── eoscfsd.cc
│ │ ├── eoscfsd.hh
│ │ ├── keychange.hh
│ │ ├── obfuscate.hh
│ │ └── overlay.hh
│ ├── eosfusebind
│ ├── eosxd/
│ │ ├── eosfuse.cc
│ │ ├── eosfuse.hh
│ │ ├── llfusexx.hh
│ │ └── main.cc
│ ├── fuse.conf.example
│ ├── fuse.example.stats.json
│ ├── fusex.proto
│ ├── kv/
│ │ ├── NoKV.cc
│ │ ├── NoKV.hh
│ │ ├── RocksKV.cc
│ │ ├── RocksKV.hh
│ │ └── kv.hh
│ ├── md/
│ │ ├── kernelcache.hh
│ │ ├── md.cc
│ │ └── md.hh
│ ├── misc/
│ │ ├── ConcurrentMount.cc
│ │ ├── ConcurrentMount.hh
│ │ ├── FuseException.hh
│ │ ├── FuseId.hh
│ │ ├── MacOSXHelper.hh
│ │ ├── RunningPidScanner.cc
│ │ ├── RunningPidScanner.hh
│ │ ├── SyncQueue.hh
│ │ ├── ThreadPool.hh
│ │ ├── Track.hh
│ │ ├── filename.hh
│ │ ├── fusexrdlogin.cc
│ │ ├── fusexrdlogin.hh
│ │ ├── longstring.cc
│ │ ├── longstring.hh
│ │ ├── richacl.hh
│ │ └── stringTS.hh
│ ├── stat/
│ │ ├── Stat.cc
│ │ └── Stat.hh
│ ├── submount/
│ │ ├── SubMount.cc
│ │ └── SubMount.hh
│ ├── tests/
│ │ ├── CMakeLists.txt
│ │ ├── auth/
│ │ │ ├── credential-finder.cc
│ │ │ ├── environment-reader.cc
│ │ │ ├── logbook.cc
│ │ │ ├── login-identifier.cc
│ │ │ ├── process-cache.cc
│ │ │ ├── process-info.cc
│ │ │ ├── rm-info.cc
│ │ │ ├── security-checker.cc
│ │ │ ├── test-utils.cc
│ │ │ ├── test-utils.hh
│ │ │ └── utils.cc
│ │ ├── eos-fusex-git-annex
│ │ ├── eos-fusex-recovery
│ │ ├── eos-test-fusex-messaging
│ │ ├── eos-test-fusex-producer-consumer
│ │ ├── interval-tree.cc
│ │ ├── ioverify.cc
│ │ ├── journal-cache.cc
│ │ ├── lru-test.cc
│ │ ├── rb-tree.cc
│ │ ├── rocks-kv.cc
│ │ └── stress/
│ │ └── xrdcl-proxy.cc
│ └── tsan/
│ └── suppressions.tsan
├── genversion.sh
├── git/
│ └── bin/
│ └── enable-hooks.sh
├── gitlab-ci/
│ ├── .gitlab-ci-build-macos.yml
│ ├── .gitlab-ci-build-ubuntu.yml
│ ├── .gitlab-ci-test-dock_include.yml
│ ├── .gitlab-ci-test-helm-server-multigroup-values.yml
│ ├── .gitlab-ci-test-helm_fusex_values.yml
│ ├── .gitlab-ci-test-helm_include.yml
│ ├── .gitlab-ci-test-helm_kuberos_values.yml
│ ├── .gitlab-ci-test-helm_server_values.yml
│ ├── .gitlab-ci-test-k8s_include.yml
│ ├── after_script_docker_test.sh
│ ├── after_script_k8s_test.sh
│ ├── before_script_docker_test.sh
│ ├── before_script_k8s_test.sh
│ ├── export_codename.sh
│ ├── export_commit-type.sh
│ ├── generate_debian_metadata.sh
│ ├── prebuild_OSbase/
│ │ ├── prebuild-cc7.Dockerfile
│ │ ├── prebuild-cc7_exotic.Dockerfile
│ │ ├── prebuild-el10.Dockerfile
│ │ ├── prebuild-el8.Dockerfile
│ │ ├── prebuild-el9-arm64.Dockerfile
│ │ └── prebuild-el9.Dockerfile
│ ├── publish_deb.sh
│ ├── remove_old_artifacts.sh
│ ├── remove_old_artifacts_debian.sh
│ ├── setup_ccache.sh
│ ├── setup_ccache_deb.sh
│ ├── setup_ccache_fc.sh
│ ├── sign_debian_repository.sh
│ ├── store_artifacts.sh
│ ├── store_artifacts_debian.sh
│ ├── store_stable_artifacts.sh
│ └── utilities_func_for_tests.sh
├── icons/
│ └── EOS.icns
├── man/
│ ├── CMakeLists.txt
│ ├── README.md
│ ├── create_eos_cmds.pl
│ └── create_man.sh
├── mgm/
│ ├── #Iostat.cc#
│ ├── CMakeLists.txt
│ ├── CtaUtils.cc
│ ├── CtaUtils.hh
│ ├── EosCtaReporter.cc
│ ├── EosCtaReporter.hh
│ ├── FuseServer/
│ │ ├── Caps.cc
│ │ ├── Caps.hh
│ │ ├── Clients.cc
│ │ ├── Clients.hh
│ │ ├── Flush.cc
│ │ ├── Flush.hh
│ │ ├── FusexCastBatch.hh
│ │ ├── Locks.cc
│ │ ├── Locks.hh
│ │ ├── Namespace.hh
│ │ ├── Server.cc
│ │ └── Server.hh
│ ├── Namespace.hh
│ ├── README.md
│ ├── access/
│ │ ├── Access.cc
│ │ └── Access.hh
│ ├── acl/
│ │ ├── Acl.cc
│ │ ├── Acl.hh
│ │ └── README.md
│ ├── adminsocket/
│ │ ├── AdminSocket.cc
│ │ └── AdminSocket.hh
│ ├── auth/
│ │ ├── AccessChecker.cc
│ │ └── AccessChecker.hh
│ ├── authz/
│ │ ├── XrdMgmAuthz.cc
│ │ └── XrdMgmAuthz.hh
│ ├── balancer/
│ │ ├── FsBalancer.cc
│ │ ├── FsBalancer.hh
│ │ ├── FsBalancerStats.cc
│ │ └── FsBalancerStats.hh
│ ├── bulk-request/
│ │ ├── BulkRequest.cc
│ │ ├── BulkRequest.hh
│ │ ├── BulkRequestFactory.cc
│ │ ├── BulkRequestFactory.hh
│ │ ├── BulkRequestHelper.hh
│ │ ├── File.hh
│ │ ├── FileCollection.hh
│ │ ├── README.md
│ │ ├── business/
│ │ │ ├── BulkRequestBusiness.cc
│ │ │ └── BulkRequestBusiness.hh
│ │ ├── dao/
│ │ │ ├── IBulkRequestDAO.hh
│ │ │ ├── factories/
│ │ │ │ ├── AbstractDAOFactory.hh
│ │ │ │ ├── ProcDirectoryDAOFactory.cc
│ │ │ │ └── ProcDirectoryDAOFactory.hh
│ │ │ └── proc/
│ │ │ ├── ProcDirBulkRequestFile.cc
│ │ │ ├── ProcDirBulkRequestFile.hh
│ │ │ ├── ProcDirectoryBulkRequestDAO.cc
│ │ │ ├── ProcDirectoryBulkRequestDAO.hh
│ │ │ ├── ProcDirectoryBulkRequestLocations.cc
│ │ │ ├── ProcDirectoryBulkRequestLocations.hh
│ │ │ └── cleaner/
│ │ │ ├── BulkRequestProcCleaner.cc
│ │ │ ├── BulkRequestProcCleaner.hh
│ │ │ ├── BulkRequestProcCleanerConfig.cc
│ │ │ └── BulkRequestProcCleanerConfig.hh
│ │ ├── exception/
│ │ │ ├── BulkRequestException.hh
│ │ │ └── PersistencyException.hh
│ │ ├── interface/
│ │ │ ├── IMgmFileSystemInterface.hh
│ │ │ ├── RealMgmFileSystemInterface.cc
│ │ │ └── RealMgmFileSystemInterface.hh
│ │ ├── prepare/
│ │ │ ├── CancellationBulkRequest.hh
│ │ │ ├── EvictBulkRequest.hh
│ │ │ ├── PrepareUtils.cc
│ │ │ ├── PrepareUtils.hh
│ │ │ ├── StageBulkRequest.hh
│ │ │ ├── manager/
│ │ │ │ ├── BulkRequestPrepareManager.cc
│ │ │ │ ├── BulkRequestPrepareManager.hh
│ │ │ │ ├── PrepareManager.cc
│ │ │ │ └── PrepareManager.hh
│ │ │ └── query-prepare/
│ │ │ ├── QueryPrepareResult.cc
│ │ │ └── QueryPrepareResult.hh
│ │ ├── response/
│ │ │ └── QueryPrepareResponse.hh
│ │ └── utils/
│ │ ├── PrepareArgumentsWrapper.hh
│ │ └── json/
│ │ └── QueryPrepareResponseJson.hh
│ ├── commandmap/
│ │ ├── CommandMap.cc
│ │ └── CommandMap.hh
│ ├── config/
│ │ ├── IConfigEngine.cc
│ │ ├── IConfigEngine.hh
│ │ ├── QuarkConfigHandler.cc
│ │ ├── QuarkConfigHandler.hh
│ │ ├── QuarkDBConfigEngine.cc
│ │ ├── QuarkDBConfigEngine.hh
│ │ └── eos-config-inspect.cc
│ ├── convert/
│ │ ├── ConversionInfo.cc
│ │ ├── ConversionInfo.hh
│ │ ├── ConversionJob.cc
│ │ ├── ConversionJob.hh
│ │ ├── ConversionTag.hh
│ │ ├── ConverterEngine.cc
│ │ └── ConverterEngine.hh
│ ├── devices/
│ │ ├── Devices.cc
│ │ └── Devices.hh
│ ├── drain/
│ │ ├── DrainFs.cc
│ │ ├── DrainFs.hh
│ │ ├── DrainTransferJob.cc
│ │ ├── DrainTransferJob.hh
│ │ ├── Drainer.cc
│ │ └── Drainer.hh
│ ├── egroup/
│ │ ├── Egroup.cc
│ │ └── Egroup.hh
│ ├── eos-repair-tool
│ ├── features/
│ │ ├── Features.cc
│ │ └── Features.hh
│ ├── filesystem/
│ │ ├── FileSystem.cc
│ │ └── FileSystem.hh
│ ├── fsck/
│ │ ├── Fsck.cc
│ │ ├── Fsck.hh
│ │ ├── FsckEntry.cc
│ │ └── FsckEntry.hh
│ ├── fsview/
│ │ ├── FsView.cc
│ │ └── FsView.hh
│ ├── fuse-locks/
│ │ ├── LockTracker.cc
│ │ └── LockTracker.hh
│ ├── geobalancer/
│ │ ├── GeoBalancer.cc
│ │ └── GeoBalancer.hh
│ ├── geotree/
│ │ ├── SchedulingFastTree.hh
│ │ ├── SchedulingSlowTree.cc
│ │ ├── SchedulingSlowTree.hh
│ │ ├── SchedulingTreeCommon.cc
│ │ ├── SchedulingTreeCommon.hh
│ │ ├── SchedulingTreeTest.cc
│ │ └── SchedulingTreeTest.cc.testfile
│ ├── geotreeengine/
│ │ ├── GeoTreeEngine.cc
│ │ └── GeoTreeEngine.hh
│ ├── groupbalancer/
│ │ ├── BalancerEngine.cc
│ │ ├── BalancerEngine.hh
│ │ ├── BalancerEngineFactory.hh
│ │ ├── BalancerEngineTypes.hh
│ │ ├── BalancerEngineUtils.hh
│ │ ├── ConverterUtils.cc
│ │ ├── ConverterUtils.hh
│ │ ├── FreeSpaceBalancerEngine.cc
│ │ ├── FreeSpaceBalancerEngine.hh
│ │ ├── GroupBalancer.cc
│ │ ├── GroupBalancer.hh
│ │ ├── GroupsInfoFetcher.cc
│ │ ├── GroupsInfoFetcher.hh
│ │ ├── MinMaxBalancerEngine.cc
│ │ ├── MinMaxBalancerEngine.hh
│ │ ├── StdDevBalancerEngine.cc
│ │ ├── StdDevBalancerEngine.hh
│ │ ├── StdDrainerEngine.cc
│ │ └── StdDrainerEngine.hh
│ ├── groupdrainer/
│ │ ├── DrainProgressTracker.cc
│ │ ├── DrainProgressTracker.hh
│ │ ├── GroupDrainer.cc
│ │ ├── GroupDrainer.hh
│ │ └── RetryTracker.hh
│ ├── grpc/
│ │ ├── GrpcNsInterface.cc
│ │ ├── GrpcNsInterface.hh
│ │ ├── GrpcRestGwInterface.cc
│ │ ├── GrpcRestGwInterface.hh
│ │ ├── GrpcRestGwServer.cc
│ │ ├── GrpcRestGwServer.hh
│ │ ├── GrpcServer.cc
│ │ ├── GrpcServer.hh
│ │ ├── GrpcWncInterface.cc
│ │ ├── GrpcWncInterface.hh
│ │ ├── GrpcWncServer.cc
│ │ └── GrpcWncServer.hh
│ ├── http/
│ │ ├── HttpHandler.cc
│ │ ├── HttpHandler.hh
│ │ ├── HttpServer.cc
│ │ ├── HttpServer.hh
│ │ ├── ProtocolHandlerFactory.hh
│ │ ├── rapidxml/
│ │ │ ├── license.txt
│ │ │ ├── rapidxml.hpp
│ │ │ ├── rapidxml_print.hpp
│ │ │ └── rapidxml_utils.hpp
│ │ ├── rest-api/
│ │ │ ├── Constants.hh
│ │ │ ├── README.md
│ │ │ ├── action/
│ │ │ │ ├── Action.hh
│ │ │ │ └── tape/
│ │ │ │ ├── TapeAction.hh
│ │ │ │ ├── TapeActions.hh
│ │ │ │ ├── archiveinfo/
│ │ │ │ │ ├── GetArchiveInfo.cc
│ │ │ │ │ └── GetArchiveInfo.hh
│ │ │ │ ├── release/
│ │ │ │ │ ├── CreateReleaseBulkRequest.cc
│ │ │ │ │ └── CreateReleaseBulkRequest.hh
│ │ │ │ └── stage/
│ │ │ │ ├── CancelStageBulkRequest.cc
│ │ │ │ ├── CancelStageBulkRequest.hh
│ │ │ │ ├── CreateStageBulkRequest.cc
│ │ │ │ ├── CreateStageBulkRequest.hh
│ │ │ │ ├── DeleteStageBulkRequest.cc
│ │ │ │ ├── DeleteStageBulkRequest.hh
│ │ │ │ ├── GetStageBulkRequest.cc
│ │ │ │ └── GetStageBulkRequest.hh
│ │ │ ├── business/
│ │ │ │ └── tape/
│ │ │ │ ├── ITapeRestApiBusiness.hh
│ │ │ │ ├── TapeRestApiBusiness.cc
│ │ │ │ └── TapeRestApiBusiness.hh
│ │ │ ├── config/
│ │ │ │ └── tape/
│ │ │ │ ├── TapeRestApiConfig.cc
│ │ │ │ └── TapeRestApiConfig.hh
│ │ │ ├── exception/
│ │ │ │ ├── Exceptions.hh
│ │ │ │ ├── JsonValidationException.hh
│ │ │ │ └── RestException.hh
│ │ │ ├── handler/
│ │ │ │ ├── RestHandler.cc
│ │ │ │ ├── RestHandler.hh
│ │ │ │ ├── tape/
│ │ │ │ │ ├── TapeRestHandler.cc
│ │ │ │ │ └── TapeRestHandler.hh
│ │ │ │ └── wellknown/
│ │ │ │ ├── WellKnownHandler.cc
│ │ │ │ └── WellKnownHandler.hh
│ │ │ ├── json/
│ │ │ │ ├── builder/
│ │ │ │ │ ├── JsonModelBuilder.hh
│ │ │ │ │ ├── ValidationError.hh
│ │ │ │ │ └── jsoncpp/
│ │ │ │ │ ├── JsonCppModelBuilder.hh
│ │ │ │ │ └── JsonCppValidator.hh
│ │ │ │ └── tape/
│ │ │ │ ├── TapeJsonifiers.hh
│ │ │ │ ├── TapeModelBuilders.hh
│ │ │ │ ├── TapeRestApiJsonifier.hh
│ │ │ │ └── model-builders/
│ │ │ │ └── validators/
│ │ │ │ └── TapeJsonCppValidator.hh
│ │ │ ├── manager/
│ │ │ │ ├── RestApiManager.cc
│ │ │ │ └── RestApiManager.hh
│ │ │ ├── model/
│ │ │ │ ├── tape/
│ │ │ │ │ ├── archiveinfo/
│ │ │ │ │ │ └── GetArchiveInfoResponseModel.hh
│ │ │ │ │ ├── common/
│ │ │ │ │ │ ├── ErrorModel.cc
│ │ │ │ │ │ ├── ErrorModel.hh
│ │ │ │ │ │ └── FilesContainer.hh
│ │ │ │ │ └── stage/
│ │ │ │ │ ├── CreateStageBulkRequestModel.hh
│ │ │ │ │ ├── CreatedStageBulkRequestResponseModel.hh
│ │ │ │ │ ├── GetStageBulkRequestResponseModel.hh
│ │ │ │ │ └── PathsModel.hh
│ │ │ │ └── wellknown/
│ │ │ │ └── tape/
│ │ │ │ └── GetTapeWellKnownModel.hh
│ │ │ ├── response/
│ │ │ │ ├── ErrorHandling.hh
│ │ │ │ ├── RestApiResponse.hh
│ │ │ │ ├── RestApiResponseFactory.hh
│ │ │ │ ├── RestResponseFactory.cc
│ │ │ │ └── RestResponseFactory.hh
│ │ │ ├── router/
│ │ │ │ └── Router.hh
│ │ │ ├── utils/
│ │ │ │ ├── URLBuilder.cc
│ │ │ │ ├── URLBuilder.hh
│ │ │ │ ├── URLParser.cc
│ │ │ │ └── URLParser.hh
│ │ │ └── wellknown/
│ │ │ └── tape/
│ │ │ ├── TapeRestApiEndpoint.cc
│ │ │ ├── TapeRestApiEndpoint.hh
│ │ │ ├── TapeWellKnownInfos.cc
│ │ │ └── TapeWellKnownInfos.hh
│ │ ├── s3/
│ │ │ ├── S3Handler.cc
│ │ │ ├── S3Handler.hh
│ │ │ ├── S3Store.cc
│ │ │ └── S3Store.hh
│ │ ├── webdav/
│ │ │ ├── LockResponse.cc
│ │ │ ├── LockResponse.hh
│ │ │ ├── PropFindResponse.cc
│ │ │ ├── PropFindResponse.hh
│ │ │ ├── PropPatchResponse.cc
│ │ │ ├── PropPatchResponse.hh
│ │ │ ├── WebDAVHandler.cc
│ │ │ ├── WebDAVHandler.hh
│ │ │ ├── WebDAVResponse.cc
│ │ │ └── WebDAVResponse.hh
│ │ └── xrdhttp/
│ │ ├── EosMgmHttpHandler.cc
│ │ ├── EosMgmHttpHandler.hh
│ │ └── README.md
│ ├── imaster/
│ │ ├── IMaster.cc
│ │ └── IMaster.hh
│ ├── inflighttracker/
│ │ ├── InFlightTracker.cc
│ │ └── InFlightTracker.hh
│ ├── inspector/
│ │ ├── FileInspector.cc
│ │ ├── FileInspector.hh
│ │ ├── FileInspectorStats.cc
│ │ └── FileInspectorStats.hh
│ ├── iostat/
│ │ ├── Iostat.cc
│ │ └── Iostat.hh
│ ├── lru/
│ │ ├── LRU.cc
│ │ └── LRU.hh
│ ├── macros/
│ │ ├── Macros.cc
│ │ └── Macros.hh
│ ├── misc/
│ │ ├── AuditHelpers.hh
│ │ ├── Constants.hh
│ │ └── IdTrackerWithValidity.hh
│ ├── namespacestats/
│ │ ├── NamespaceStats.cc
│ │ └── NamespaceStats.hh
│ ├── ofs/
│ │ ├── XrdMgmOfs.cc
│ │ ├── XrdMgmOfs.hh
│ │ ├── XrdMgmOfsConfigure.cc
│ │ ├── XrdMgmOfsDirectory.cc
│ │ ├── XrdMgmOfsDirectory.hh
│ │ ├── XrdMgmOfsFile.cc
│ │ ├── XrdMgmOfsFile.hh
│ │ ├── XrdMgmOfsSecurity.hh
│ │ ├── XrdMgmOfsTrace.hh
│ │ ├── cmds/
│ │ │ ├── Access.inc
│ │ │ ├── Attr.inc
│ │ │ ├── Auth.inc
│ │ │ ├── Chksum.inc
│ │ │ ├── Chmod.inc
│ │ │ ├── Chown.inc
│ │ │ ├── Coverage.inc
│ │ │ ├── DeleteExternal.inc
│ │ │ ├── DropReplica.inc
│ │ │ ├── ErrorLogListener.inc
│ │ │ ├── Exists.inc
│ │ │ ├── FAttr.inc
│ │ │ ├── Find.inc
│ │ │ ├── FsConfigListener.inc
│ │ │ ├── Fsctl.inc
│ │ │ ├── Link.inc
│ │ │ ├── Mkdir.inc
│ │ │ ├── PathMap.inc
│ │ │ ├── Remdir.inc
│ │ │ ├── Rename.inc
│ │ │ ├── Rm.inc
│ │ │ ├── SharedPath.inc
│ │ │ ├── ShouldRedirect.inc
│ │ │ ├── ShouldRoute.inc
│ │ │ ├── ShouldStall.inc
│ │ │ ├── Shutdown.inc
│ │ │ ├── Stacktrace.inc
│ │ │ ├── Stat.inc
│ │ │ ├── Stripes.inc
│ │ │ ├── Touch.inc
│ │ │ ├── Utimes.inc
│ │ │ └── Version.inc
│ │ └── fsctl/
│ │ ├── Access.cc
│ │ ├── AdjustReplica.cc
│ │ ├── Checksum.cc
│ │ ├── Chmod.cc
│ │ ├── Chown.cc
│ │ ├── Commit.cc
│ │ ├── CommitHelper.cc
│ │ ├── CommitHelper.hh
│ │ ├── Drop.cc
│ │ ├── Event.cc
│ │ ├── Fusex.cc
│ │ ├── GetFusex.cc
│ │ ├── Getfmd.cc
│ │ ├── Mkdir.cc
│ │ ├── Open.cc
│ │ ├── Readlink.cc
│ │ ├── Redirect.cc
│ │ ├── Stat.cc
│ │ ├── Statvfs.cc
│ │ ├── Symlink.cc
│ │ ├── Utimes.cc
│ │ └── Version.cc
│ ├── pathrouting/
│ │ ├── PathRouting.cc
│ │ └── PathRouting.hh
│ ├── placement/
│ │ ├── ClusterDataTypes.hh
│ │ ├── ClusterMap.cc
│ │ ├── ClusterMap.hh
│ │ ├── FlatScheduler.cc
│ │ ├── FlatScheduler.hh
│ │ ├── FsScheduler.cc
│ │ ├── FsScheduler.hh
│ │ ├── PlacementStrategy.cc
│ │ ├── PlacementStrategy.hh
│ │ ├── RRSeed.hh
│ │ ├── RoundRobinPlacementStrategy.cc
│ │ ├── RoundRobinPlacementStrategy.hh
│ │ ├── ThreadLocalRRSeed.cc
│ │ ├── ThreadLocalRRSeed.hh
│ │ ├── WeightedRandomStrategy.cc
│ │ ├── WeightedRandomStrategy.hh
│ │ ├── WeightedRoundRobinStrategy.cc
│ │ └── WeightedRoundRobinStrategy.hh
│ ├── policy/
│ │ ├── Policy.cc
│ │ └── Policy.hh
│ ├── proc/
│ │ ├── IProcCommand.cc
│ │ ├── IProcCommand.hh
│ │ ├── ProcCommand.cc
│ │ ├── ProcCommand.hh
│ │ ├── ProcInterface.cc
│ │ ├── ProcInterface.hh
│ │ ├── admin/
│ │ │ ├── Access.cc
│ │ │ ├── AccessCmd.cc
│ │ │ ├── AccessCmd.hh
│ │ │ ├── Backup.cc
│ │ │ ├── Backup.hh
│ │ │ ├── ConfigCmd.cc
│ │ │ ├── ConfigCmd.hh
│ │ │ ├── ConvertCmd.cc
│ │ │ ├── ConvertCmd.hh
│ │ │ ├── DebugCmd.cc
│ │ │ ├── DebugCmd.hh
│ │ │ ├── DevicesCmd.cc
│ │ │ ├── DevicesCmd.hh
│ │ │ ├── EvictCmd.cc
│ │ │ ├── EvictCmd.hh
│ │ │ ├── FileRegisterCmd.cc
│ │ │ ├── FileRegisterCmd.hh
│ │ │ ├── FsCmd.cc
│ │ │ ├── FsCmd.hh
│ │ │ ├── FsckCmd.cc
│ │ │ ├── FsckCmd.hh
│ │ │ ├── Fusex.cc
│ │ │ ├── GeoSched.cc
│ │ │ ├── GroupCmd.cc
│ │ │ ├── GroupCmd.hh
│ │ │ ├── IoCmd.cc
│ │ │ ├── IoCmd.hh
│ │ │ ├── IoShapingCmd.cc
│ │ │ ├── NodeCmd.cc
│ │ │ ├── NodeCmd.hh
│ │ │ ├── NsCmd.cc
│ │ │ ├── NsCmd.hh
│ │ │ ├── Quota.cc
│ │ │ ├── QuotaCmd.cc
│ │ │ ├── QuotaCmd.hh
│ │ │ ├── Rtlog.cc
│ │ │ ├── SchedCmd.cc
│ │ │ ├── SchedCmd.hh
│ │ │ ├── SpaceCmd.cc
│ │ │ ├── SpaceCmd.hh
│ │ │ └── Vid.cc
│ │ ├── proc_fs.cc
│ │ ├── proc_fs.hh
│ │ └── user/
│ │ ├── Accounting.cc
│ │ ├── AclCmd.cc
│ │ ├── AclCmd.hh
│ │ ├── Archive.cc
│ │ ├── Attr.cc
│ │ ├── Cd.cc
│ │ ├── Chmod.cc
│ │ ├── Chown.cc
│ │ ├── DfCmd.cc
│ │ ├── DfCmd.hh
│ │ ├── File.cc
│ │ ├── Fileinfo.cc
│ │ ├── Find.cc
│ │ ├── Fuse.cc
│ │ ├── FuseX.cc
│ │ ├── Ls.cc
│ │ ├── Map.cc
│ │ ├── Member.cc
│ │ ├── Mkdir.cc
│ │ ├── Motd.cc
│ │ ├── NewfindCmd.cc
│ │ ├── NewfindCmd.hh
│ │ ├── Quota.cc
│ │ ├── RecycleCmd.cc
│ │ ├── RecycleCmd.hh
│ │ ├── Rm.cc
│ │ ├── RmCmd.cc
│ │ ├── RmCmd.hh
│ │ ├── Rmdir.cc
│ │ ├── RouteCmd.cc
│ │ ├── RouteCmd.hh
│ │ ├── TokenCmd.cc
│ │ ├── TokenCmd.hh
│ │ ├── Version.cc
│ │ ├── Who.cc
│ │ └── Whoami.cc
│ ├── qdbmaster/
│ │ ├── QdbMaster.cc
│ │ └── QdbMaster.hh
│ ├── quota/
│ │ ├── #Quota.cc#
│ │ ├── Quota.cc
│ │ └── Quota.hh
│ ├── recycle/
│ │ ├── Recycle.cc
│ │ ├── Recycle.hh
│ │ ├── RecycleEntry.cc
│ │ ├── RecycleEntry.hh
│ │ ├── RecyclePolicy.cc
│ │ └── RecyclePolicy.hh
│ ├── routeendpoint/
│ │ ├── RouteEndpoint.cc
│ │ └── RouteEndpoint.hh
│ ├── scheduler/
│ │ ├── Scheduler.cc
│ │ └── Scheduler.hh
│ ├── shaping/
│ │ ├── TrafficShaping.cc
│ │ └── TrafficShaping.hh
│ ├── stat/
│ │ ├── Stat.cc
│ │ └── Stat.hh
│ ├── tgc/
│ │ ├── AsyncResult.hh
│ │ ├── AsyncUint64ShellCmd.cc
│ │ ├── AsyncUint64ShellCmd.hh
│ │ ├── BlockingFlag.hh
│ │ ├── CachedValue.hh
│ │ ├── Constants.hh
│ │ ├── DummyClock.hh
│ │ ├── DummyTapeGcMgm.cc
│ │ ├── DummyTapeGcMgm.hh
│ │ ├── FreedBytesHistogram.cc
│ │ ├── FreedBytesHistogram.hh
│ │ ├── IClock.cc
│ │ ├── IClock.hh
│ │ ├── ITapeGcMgm.cc
│ │ ├── ITapeGcMgm.hh
│ │ ├── Lru.cc
│ │ ├── Lru.hh
│ │ ├── MaxLenExceeded.cc
│ │ ├── MaxLenExceeded.hh
│ │ ├── MultiSpaceTapeGc.cc
│ │ ├── MultiSpaceTapeGc.hh
│ │ ├── RealClock.cc
│ │ ├── RealClock.hh
│ │ ├── RealTapeGcMgm.cc
│ │ ├── RealTapeGcMgm.hh
│ │ ├── SmartSpaceStats.cc
│ │ ├── SmartSpaceStats.hh
│ │ ├── SpaceConfig.hh
│ │ ├── SpaceNotFound.cc
│ │ ├── SpaceNotFound.hh
│ │ ├── SpaceStats.hh
│ │ ├── SpaceToTapeGcMap.cc
│ │ ├── SpaceToTapeGcMap.hh
│ │ ├── TapeGc.cc
│ │ ├── TapeGc.hh
│ │ ├── TapeGcStats.hh
│ │ └── TestingTapeGc.hh
│ ├── tracker/
│ │ ├── ReplicationTracker.cc
│ │ └── ReplicationTracker.hh
│ ├── utils/
│ │ ├── AttrHelper.cc
│ │ ├── AttrHelper.hh
│ │ ├── FileSystemRegistry.cc
│ │ ├── FileSystemRegistry.hh
│ │ ├── FileSystemStatusUtils.cc
│ │ ├── FileSystemStatusUtils.hh
│ │ ├── FilesystemUuidMapper.cc
│ │ └── FilesystemUuidMapper.hh
│ ├── vid/
│ │ ├── Vid.cc
│ │ └── Vid.hh
│ ├── wfe/
│ │ ├── WFE.cc
│ │ └── WFE.hh
│ ├── wfe.proto
│ ├── workflow/
│ │ ├── Workflow.cc
│ │ └── Workflow.hh
│ ├── xattr/
│ │ ├── XattrLock.hh
│ │ └── XattrSet.hh
│ └── zmq/
│ ├── ZMQ.cc
│ └── ZMQ.hh
├── misc/
│ ├── CMakeLists.txt
│ ├── cmake/
│ │ ├── cmake-3.15.5-Linux-x86_64.sh
│ │ └── cmake-3.19.7-Linux-x86_64.sh
│ ├── egi/
│ │ ├── CMakeLists.txt
│ │ ├── eos-info-provider.py
│ │ └── eos-star-accounting.py
│ ├── etc/
│ │ ├── CMakeLists.txt
│ │ ├── auto.cfsd
│ │ ├── auto.master.d/
│ │ │ └── cfsd.autofs
│ │ ├── bash_completion.d/
│ │ │ └── eos
│ │ ├── cron.d/
│ │ │ ├── eos-health
│ │ │ ├── eos-logs
│ │ │ ├── eos-mgm-monitoring
│ │ │ ├── eos-reports
│ │ │ └── xrd-alive
│ │ ├── eos/
│ │ │ ├── cfsd/
│ │ │ │ └── eoscfsd.conf
│ │ │ └── config/
│ │ │ ├── fst/
│ │ │ │ └── fst
│ │ │ ├── generic/
│ │ │ │ └── all
│ │ │ ├── mgm/
│ │ │ │ ├── auth
│ │ │ │ ├── mgm
│ │ │ │ └── mgm.modules
│ │ │ ├── modules/
│ │ │ │ └── alice
│ │ │ └── qdb/
│ │ │ └── qdb
│ │ ├── eos.client.keytab
│ │ ├── eos.keytab
│ │ ├── fuse.conf
│ │ ├── fuse.conf.eos
│ │ ├── logrotate.d/
│ │ │ ├── eos-fuse-logs
│ │ │ ├── eos-fusex-logs
│ │ │ └── eos-logs
│ │ ├── profile.d/
│ │ │ └── eos-completion.sh
│ │ ├── sysconfig/
│ │ │ └── eos_env.example
│ │ ├── systemd/
│ │ │ └── system/
│ │ │ ├── eos.service
│ │ │ ├── eos.target
│ │ │ ├── eos5-fst@.service
│ │ │ ├── eos5-mgm@.service
│ │ │ ├── eos5-qdb@.service
│ │ │ ├── eos5.service
│ │ │ ├── eos@.service
│ │ │ ├── eos@.socket
│ │ │ ├── eos@master.service
│ │ │ └── eos@slave.service
│ │ ├── xrd.cf.auth
│ │ ├── xrd.cf.fed
│ │ ├── xrd.cf.fst
│ │ ├── xrd.cf.mgm
│ │ ├── xrd.cf.prefix
│ │ ├── xrd.cf.quarkdb
│ │ ├── xrd.cf.sync
│ │ └── zsh/
│ │ └── site-functions/
│ │ └── _eos
│ ├── sbin/
│ │ ├── CMakeLists.txt
│ │ ├── eos-diagnostic-tool
│ │ ├── eos-inspectorreport
│ │ ├── eos-inspectorstat
│ │ ├── eos-jwk-https
│ │ ├── eos-jwker.readme
│ │ ├── eos-mdreport
│ │ ├── eos-mdstat
│ │ ├── eos-prom-push
│ │ ├── eos-reportstat
│ │ ├── eos-status
│ │ ├── eos_start.sh
│ │ ├── eos_start_pre.sh
│ │ ├── mount.eoscfs
│ │ ├── mount.eosx
│ │ ├── mount.eosx3
│ │ └── umount.fuse
│ ├── selinux/
│ │ ├── CMakeLists.txt
│ │ ├── README.md
│ │ ├── choose_selinux.sh
│ │ ├── eosfuse-7.pp
│ │ └── eosfuse.te
│ ├── usr/
│ │ ├── CMakeLists.txt
│ │ ├── eos-fusex-core.conf
│ │ └── eosd.conf
│ └── var/
│ ├── CMakeLists.txt
│ └── eos/
│ ├── test/
│ │ ├── LeakSanitizer.supp
│ │ └── fuse/
│ │ └── untar/
│ │ ├── untar.tgz
│ │ └── xrootd.tgz
│ └── wfe/
│ └── bash/
│ └── shell
├── namespace/
│ ├── CMakeLists.txt
│ ├── Constants.cc
│ ├── Constants.hh
│ ├── MDException.cc
│ ├── MDException.hh
│ ├── MDLocking.cc
│ ├── MDLocking.hh
│ ├── Namespace.hh
│ ├── PermissionHandler.cc
│ ├── PermissionHandler.hh
│ ├── Prefetcher.cc
│ ├── Prefetcher.hh
│ ├── Resolver.cc
│ ├── Resolver.hh
│ ├── interface/
│ │ ├── ContainerIterators.hh
│ │ ├── IContainerMD.hh
│ │ ├── IContainerMDSvc.hh
│ │ ├── IFileMD.hh
│ │ ├── IFileMDSvc.hh
│ │ ├── IFsView.hh
│ │ ├── INamespaceGroup.hh
│ │ ├── INamespaceStats.hh
│ │ ├── IQuota.hh
│ │ ├── IView.hh
│ │ ├── Identifiers.hh
│ │ ├── LockableNSObject.hh
│ │ └── Misc.hh
│ ├── locking/
│ │ ├── BulkNsObjectLocker.hh
│ │ ├── NSObjectLocker.hh
│ │ └── RawPtr.hh
│ ├── ns_quarkdb/
│ │ ├── CMakeLists.txt
│ │ ├── CacheRefreshListener.cc
│ │ ├── CacheRefreshListener.hh
│ │ ├── ConfigurationParser.hh
│ │ ├── Constants.hh
│ │ ├── ContainerMD.cc
│ │ ├── ContainerMD.hh
│ │ ├── FileMD.cc
│ │ ├── FileMD.hh
│ │ ├── LRU.hh
│ │ ├── NamespaceGroup.cc
│ │ ├── NamespaceGroup.hh
│ │ ├── NsQuarkdbPlugin.cc
│ │ ├── NsQuarkdbPlugin.hh
│ │ ├── QClPerformance.cc
│ │ ├── QClPerformance.hh
│ │ ├── QdbContactDetails.hh
│ │ ├── VersionEnforcement.cc
│ │ ├── VersionEnforcement.hh
│ │ ├── accounting/
│ │ │ ├── ContainerAccounting.cc
│ │ │ ├── ContainerAccounting.hh
│ │ │ ├── FileSystemHandler.cc
│ │ │ ├── FileSystemHandler.hh
│ │ │ ├── FileSystemView.cc
│ │ │ ├── FileSystemView.hh
│ │ │ ├── QuotaNodeCore.cc
│ │ │ ├── QuotaNodeCore.hh
│ │ │ ├── QuotaStats.cc
│ │ │ ├── QuotaStats.hh
│ │ │ ├── SetChangeList.hh
│ │ │ ├── SyncTimeAccounting.cc
│ │ │ └── SyncTimeAccounting.hh
│ │ ├── explorer/
│ │ │ ├── NamespaceExplorer.cc
│ │ │ └── NamespaceExplorer.hh
│ │ ├── flusher/
│ │ │ ├── MetadataFlusher.cc
│ │ │ └── MetadataFlusher.hh
│ │ ├── inspector/
│ │ │ ├── AttributeExtraction.cc
│ │ │ ├── AttributeExtraction.hh
│ │ │ ├── ContainerScanner.cc
│ │ │ ├── ContainerScanner.hh
│ │ │ ├── FileMetadataFilter.cc
│ │ │ ├── FileMetadataFilter.hh
│ │ │ ├── FileScanner.cc
│ │ │ ├── FileScanner.hh
│ │ │ ├── Inspector.cc
│ │ │ ├── Inspector.hh
│ │ │ ├── OutputSink.cc
│ │ │ ├── OutputSink.hh
│ │ │ ├── Printing.cc
│ │ │ └── Printing.hh
│ │ ├── persistency/
│ │ │ ├── ContainerMDSvc.cc
│ │ │ ├── ContainerMDSvc.hh
│ │ │ ├── FileMDSvc.cc
│ │ │ ├── FileMDSvc.hh
│ │ │ ├── FileSystemIterator.cc
│ │ │ ├── FileSystemIterator.hh
│ │ │ ├── MetadataFetcher.cc
│ │ │ ├── MetadataFetcher.hh
│ │ │ ├── MetadataProvider.cc
│ │ │ ├── MetadataProvider.hh
│ │ │ ├── MetadataProviderShard.cc
│ │ │ ├── MetadataProviderShard.hh
│ │ │ ├── NextInodeProvider.cc
│ │ │ ├── NextInodeProvider.hh
│ │ │ ├── RequestBuilder.cc
│ │ │ ├── RequestBuilder.hh
│ │ │ ├── Serialization.cc
│ │ │ ├── Serialization.hh
│ │ │ ├── UnifiedInodeProvider.cc
│ │ │ └── UnifiedInodeProvider.hh
│ │ ├── tests/
│ │ │ ├── CMakeLists.txt
│ │ │ ├── ContainerMDSvcTest.cc
│ │ │ ├── EosNamespaceBenchmark.cc
│ │ │ ├── FileMDSvcTest.cc
│ │ │ ├── FileSystemViewTest.cc
│ │ │ ├── HierarchicalViewTest.cc
│ │ │ ├── LruBenchmark.cc
│ │ │ ├── Main.cc
│ │ │ ├── MetadataFiltering.cc
│ │ │ ├── MetadataTests.cc
│ │ │ ├── MockContainerMD.hh
│ │ │ ├── MockContainerMDSvc.hh
│ │ │ ├── MockFileMDSvc.hh
│ │ │ ├── Namespace.hh
│ │ │ ├── NextInodeProviderTest.cc
│ │ │ ├── NsTests.cc
│ │ │ ├── NsTests.hh
│ │ │ ├── OtherTests.cc
│ │ │ ├── README.md
│ │ │ ├── TestUtils.hh
│ │ │ ├── VariousTests.cc
│ │ │ └── utils/
│ │ │ └── break-file.py
│ │ ├── tools/
│ │ │ ├── EosConvertToLocalityHashes.cc
│ │ │ ├── Fid2PathTool.cc
│ │ │ ├── InodeToFidTool.cc
│ │ │ └── InspectionTool.cc
│ │ ├── utils/
│ │ │ ├── FutureVectorIterator.hh
│ │ │ ├── QuotaRecomputer.cc
│ │ │ └── QuotaRecomputer.hh
│ │ └── views/
│ │ ├── HierarchicalView.cc
│ │ └── HierarchicalView.hh
│ └── utils/
│ ├── Attributes.hh
│ ├── BalanceCalculator.hh
│ ├── Buffer.hh
│ ├── Checksum.hh
│ ├── DataHelper.cc
│ ├── DataHelper.hh
│ ├── Descriptor.cc
│ ├── Descriptor.hh
│ ├── Etag.cc
│ ├── Etag.hh
│ ├── FileListRandomPicker.cc
│ ├── FileListRandomPicker.hh
│ ├── LocalityHint.hh
│ ├── Mode.hh
│ ├── PathProcessor.hh
│ ├── RenameSafetyCheck.hh
│ ├── RmrfHelper.hh
│ ├── Stat.hh
│ └── StringConvertion.hh
├── nginx/
│ ├── README
│ ├── etc/
│ │ ├── init.d/
│ │ │ └── nginx.init
│ │ ├── logrotate.d/
│ │ │ └── nginx.logrotate
│ │ ├── nginx/
│ │ │ └── nginx.eos.conf.template
│ │ ├── sysconfig/
│ │ │ ├── nginx.sysconfig
│ │ │ └── nginx.sysconfig.systemd
│ │ └── systemd/
│ │ └── nginx.service
│ ├── jenkins-build.sh
│ ├── makesrpm.sh
│ ├── nginx-allow-proxy-certs.patch
│ ├── nginx-allow-put-redirect.patch
│ ├── nginx-no-body-before-redirect.patch
│ └── nginx.spec
├── proto/
│ ├── Audit.proto
│ ├── CMakeLists.txt
│ ├── eos_rest_gateway/
│ │ ├── eos_rest_gateway_service.proto
│ │ └── google/
│ │ └── api/
│ │ ├── annotations.proto
│ │ └── http.proto
│ ├── fst/
│ │ ├── Delete.proto
│ │ └── FmdBase.proto
│ └── namespace/
│ └── ns_quarkdb/
│ ├── ChangelogEntry.proto
│ ├── ContainerMd.proto
│ └── FileMd.proto
├── repo/
│ ├── eos-el7-dev.repo
│ ├── eos-el7.repo
│ ├── eos-el8-dev.repo
│ ├── eos-el8.repo
│ ├── eos-el8s-dev.repo
│ ├── eos-el8s.repo
│ ├── eos-el9s-dev.repo
│ └── eos-el9s.repo
├── test/
│ ├── CMakeLists.txt
│ ├── EosChecksumBenchmark.cc
│ ├── EosCryptoTimingTest.cc
│ ├── EosHashBenchmark.cc
│ ├── EosIdMapBenchmark.cc
│ ├── EosLoggingBenchmark.cc
│ ├── EosMmap.cc
│ ├── EosOpenTruncUpdate.cc
│ ├── EosUdpDumper.cc
│ ├── TestHmacSha256.cc
│ ├── ThreadPoolTest.cc
│ ├── XrdCpAbort.cc
│ ├── XrdCpAppend.cc
│ ├── XrdCpAppendOverlap.cc
│ ├── XrdCpBackward.cc
│ ├── XrdCpDownloadRandom.cc
│ ├── XrdCpExtend.cc
│ ├── XrdCpHoles.cc
│ ├── XrdCpNonStreaming.cc
│ ├── XrdCpPartial.cc
│ ├── XrdCpPgRead.cc
│ ├── XrdCpPosixCache.cc
│ ├── XrdCpRandom.cc
│ ├── XrdCpShrink.cc
│ ├── XrdCpSlowWriter.cc
│ ├── XrdCpTruncate.cc
│ ├── XrdCpUpdate.cc
│ ├── XrdStress.cc
│ ├── XrdStress.hh
│ ├── benchmark/
│ │ ├── CMakeLists.txt
│ │ ├── ConfigProto.proto
│ │ ├── Configuration.cc
│ │ ├── Configuration.hh
│ │ ├── DirEos.cc
│ │ ├── DirEos.hh
│ │ ├── FileEos.cc
│ │ ├── FileEos.hh
│ │ ├── Namespace.hh
│ │ ├── ProtoIo.cc
│ │ ├── ProtoIo.hh
│ │ ├── Result.cc
│ │ ├── Result.hh
│ │ ├── ResultProto.proto
│ │ ├── eosbenchmark.cc
│ │ └── eosbenchmark.hh
│ ├── eos-accounting-test
│ ├── eos-acl-concurrent
│ ├── eos-altxs-test
│ ├── eos-backup
│ ├── eos-backup-browser
│ ├── eos-balance-test
│ ├── eos-bash
│ ├── eos-convert-test
│ ├── eos-defaultcc-test
│ ├── eos-drain-test
│ ├── eos-file-cont-detached-test
│ ├── eos-fsck-test
│ ├── eos-fst-close-test
│ ├── eos-groupdrain-test
│ ├── eos-grpc-test
│ ├── eos-http-upload-test
│ ├── eos-https-functional-test
│ ├── eos-instance-test
│ ├── eos-instance-test-ci
│ ├── eos-io-test
│ ├── eos-lru-test
│ ├── eos-macaroon-init
│ ├── eos-manila-test
│ ├── eos-oc-test
│ ├── eos-quota-test
│ ├── eos-rain-test
│ ├── eos-rclone-test
│ ├── eos-recycle-test
│ ├── eos-rename-test
│ ├── eos-squash-test
│ ├── eos-synctime-test
│ ├── eos-test-utils
│ ├── eos-timestamp-test
│ ├── eos-token-test
│ ├── eos-traffic-shaping-test
│ ├── eos_io_tool.cc
│ ├── eoscp-rain-test
│ ├── fuse/
│ │ └── eos-fuse-test
│ ├── fusex/
│ │ ├── eos-fusex-functional-test
│ │ └── eos-test-credential-bindings
│ ├── microbenchmarks/
│ │ ├── CMakeLists.txt
│ │ ├── README.md
│ │ ├── common/
│ │ │ ├── BM_AtomicPtr.cc
│ │ │ ├── BM_IdMap.cc
│ │ │ ├── BM_Random.cc
│ │ │ ├── BM_StringUtils.cc
│ │ │ ├── BM_ThreadId.cc
│ │ │ └── BM_XrdString.cc
│ │ ├── main.cc
│ │ ├── mgm/
│ │ │ ├── BM_FlatScheduler.cc
│ │ │ └── BM_RRSeed.cc
│ │ └── namespace/
│ │ └── ns_quarkdb/
│ │ ├── BM_NSLocking.cc
│ │ └── README.md
│ ├── mq/
│ │ ├── SharedHashLoadTest.cc
│ │ ├── XrdMqClientMaster.cc
│ │ ├── XrdMqClientTest.cc
│ │ ├── XrdMqClientWorker.cc
│ │ ├── XrdMqQueueDumper.cc
│ │ ├── XrdMqQueueFeeder.cc
│ │ ├── XrdMqQueueInjection.cc
│ │ ├── XrdMqSharedObjectBroadCastClient.cc
│ │ ├── XrdMqSharedObjectClient.cc
│ │ └── XrdMqSharedObjectQueueClient.cc
│ ├── test-eos-iam-mapfile.py
│ └── xrdstress
├── test.cmake
├── unit_tests/
│ ├── CMakeLists.txt
│ ├── README.md
│ ├── auth_plugin/
│ │ ├── AuthFsTest.cc
│ │ ├── Namespace.hh
│ │ ├── TestEnv.cc
│ │ └── TestEnv.hh
│ ├── common/
│ │ ├── AuditTests.cc
│ │ ├── BackOffInvokerTests.cc
│ │ ├── BufferManagerTests.cc
│ │ ├── ConcurrentQueueTests.cc
│ │ ├── ConfigStoreTests.cc
│ │ ├── ConfigTests.cc
│ │ ├── ContainerUtilsTests.cc
│ │ ├── CounterTests.cc
│ │ ├── EosTokenTests.cc
│ │ ├── FileIdTests.cc
│ │ ├── FileMapTests.cc
│ │ ├── FutureWrapperTests.cc
│ │ ├── GlobTests.cc
│ │ ├── InodeTests.cc
│ │ ├── LoggingTests.cc
│ │ ├── LoggingTestsUtils.cc
│ │ ├── MappingTestFixture.hh
│ │ ├── MappingTests.cc
│ │ ├── MemConfigStore.hh
│ │ ├── Namespace.hh
│ │ ├── ObserverMgrTests.cc
│ │ ├── PathTests.cc
│ │ ├── RWMutexTest.cc
│ │ ├── RandTests.cc
│ │ ├── RateLimitTests.cc
│ │ ├── RegexWrapperTests.cc
│ │ ├── SciTokensTests.cc
│ │ ├── ShardedCacheTests.cc
│ │ ├── StringConversionTests.cc
│ │ ├── StringSplitTests.cc
│ │ ├── StringTokenizerTests.cc
│ │ ├── StringUtilsTests.cc
│ │ ├── SymKeysTests.cc
│ │ ├── ThreadPoolTest.cc
│ │ ├── TimingTests.cc
│ │ ├── UriCapCipherTests.cc
│ │ ├── UtilsTests.cc
│ │ ├── VariousTests.cc
│ │ ├── WebNotifyTests.cc
│ │ ├── XrdConnPoolTests.cc
│ │ ├── async/
│ │ │ ├── ExecutorMgrTests.cc
│ │ │ ├── FollyExecutorFixture.hh
│ │ │ └── OpaqueFutureTests.cc
│ │ └── concurrency/
│ │ ├── AlignedAtomicArrayTests.cc
│ │ ├── AtomicUniquePtrTests.cc
│ │ ├── RCUTests.cc
│ │ └── ThreadEpochCounterTests.cc
│ ├── console/
│ │ ├── AclCmdTest.cc
│ │ ├── CmdsTests.cc
│ │ ├── ConsoleCompletionTest.cc
│ │ ├── ConsoleUtilTests.cc
│ │ ├── ParseCommentTest.cc
│ │ └── RegexUtilTest.cc
│ ├── fst/
│ │ ├── HealthTest.cc
│ │ ├── HttpHandlerFstFileCacheTests.cc
│ │ ├── LoadTests.cc
│ │ ├── MonitorVarPartitionTest.cc
│ │ ├── Namespace.hh
│ │ ├── NfsIoTests.cc
│ │ ├── ResponseCollectorTests.cc
│ │ ├── ScanDirTests.cc
│ │ ├── TestEnv.cc
│ │ ├── TestEnv.hh
│ │ ├── TmpDirTree.hh
│ │ ├── UtilsTest.cc
│ │ ├── WalkDirTreeTests.cc
│ │ ├── XrdFstOfsFileInternalTest.cc
│ │ ├── XrdFstOfsFileTest.cc
│ │ ├── XrdFstOfsTests.cc
│ │ ├── XrdFstOssFileTest.cc
│ │ ├── XrdIoTests.cc
│ │ └── main_fst.cc
│ ├── fusex/
│ │ └── StatTests.cc
│ ├── mgm/
│ │ ├── AccessTests.cc
│ │ ├── AclCmdTests.cc
│ │ ├── CapsTests.cc
│ │ ├── CommitHelperTests.cc
│ │ ├── ConversionInfoTests.cc
│ │ ├── CtaUtilsTests.cc
│ │ ├── EgroupTests.cc
│ │ ├── FileSystemRegistryTests.cc
│ │ ├── FsViewTests.cc
│ │ ├── FsckEntryTests.cc
│ │ ├── FusexCastBatchTests.cc
│ │ ├── HttpTests.cc
│ │ ├── IdTrackerTests.cc
│ │ ├── IostatTests.cc
│ │ ├── LRUTests.cc
│ │ ├── LockTrackerTests.cc
│ │ ├── PolicyTests.cc
│ │ ├── ProcFsTests.cc
│ │ ├── QuarkDBConfigTests.cc
│ │ ├── RecyclePolicyTests.cc
│ │ ├── RecycleTests.cc
│ │ ├── RoutingTests.cc
│ │ ├── XrdMgmOfsFileTests.cc
│ │ ├── XrdMgmOfsTests.cc
│ │ ├── bulk-request/
│ │ │ ├── BulkRequestPrepareManagerTest.cc
│ │ │ ├── MockPrepareMgmFSInterface.cc
│ │ │ ├── MockPrepareMgmFSInterface.hh
│ │ │ ├── PrepareManagerTest.cc
│ │ │ └── PrepareManagerTest.hh
│ │ ├── groupbalancer/
│ │ │ ├── BalancerEngineTypeTests.cc
│ │ │ ├── FreeSpaceBalancerTests.cc
│ │ │ ├── GroupBalancerUtilsTests.cc
│ │ │ ├── GroupsInfoFetcherTests.cc
│ │ │ ├── MinMaxBalancerEngineTests.cc
│ │ │ ├── StdDevBalancerEngineTests.cc
│ │ │ └── StdDrainerTests.cc
│ │ ├── groupdrainer/
│ │ │ ├── DrainProgressTrackerTests.cc
│ │ │ ├── GroupDrainerRetry.cc
│ │ │ └── GroupDrainerTests.cc
│ │ ├── http/
│ │ │ ├── HttpServerTests.cc
│ │ │ └── rest-api/
│ │ │ └── tape/
│ │ │ ├── JsonCPPTapeModelBuilderTest.cc
│ │ │ ├── JsonCPPTapeModelBuilderTest.hh
│ │ │ ├── RestApiTest.cc
│ │ │ └── RestApiTest.hh
│ │ ├── placement/
│ │ │ ├── ClusterMapFixture.hh
│ │ │ ├── ClusterMapTests.cc
│ │ │ ├── FsSchedulerTests.cc
│ │ │ ├── PlacementStrategyTests.cc
│ │ │ ├── RRSeedTests.cc
│ │ │ ├── SchedulerTests.cc
│ │ │ └── ThreadLocalRRSeedTests.cc
│ │ ├── tgc/
│ │ │ ├── CachedValueTests.cc
│ │ │ ├── FreedBytesHistogramTests.cc
│ │ │ ├── LruTests.cc
│ │ │ ├── MultiSpaceTapeGcTests.cc
│ │ │ ├── SmartSpaceStatsTests.cc
│ │ │ ├── SpaceToTapeGcMapTests.cc
│ │ │ └── TapeGcTests.cc
│ │ └── utils/
│ │ └── AttrHelperTests.cc
│ └── with_qdb/
│ ├── Main.cc
│ ├── TestUtils.cc
│ ├── TestUtils.hh
│ └── configuration.cc
└── utils/
├── CMakeLists.txt
├── README.osx
├── astylerc
├── centos7-dev-environment.sh
├── centos8-dev-environment.sh
├── clang-format-diff.py
├── el7-packages.sh
├── el9-dev-environment.sh
├── eos-cdmi-setup.sh
├── eos-fst-clean
├── eos-log-clean
├── eos-mgm-clean
├── eos-osx-package-prepare.sh
├── eos-osx-package.sh
├── eos-ports-block
├── eos-tty-broadcast
├── eos-uninstall
├── eos-xrootd-install.sh
├── eosx
├── filter-trace/
│ ├── .gitignore
│ ├── eos-filter-stacktrace
│ └── test-eos-filter-stacktrace.py
├── flamegraph/
│ ├── eos-make-flamegraph
│ ├── eos-util-flamegraph
│ └── eos-util-stackcollapse
├── get-xrootd-git-master.sh
├── make-keytab
├── replace-in-sources
├── route-http
└── zstdtail.cc
================================================
FILE CONTENTS
================================================
================================================
FILE: .clang-format
================================================
Language: Cpp
AccessModifierOffset: -2
AlignAfterOpenBracket: Align
AlignConsecutiveAssignments: false
AlignConsecutiveDeclarations: false
AlignOperands: Align
AlignTrailingComments: true
AllowAllParametersOfDeclarationOnNextLine: true
AllowShortBlocksOnASingleLine: Never
AllowShortCaseLabelsOnASingleLine: false
AllowShortFunctionsOnASingleLine: All
AllowShortIfStatementsOnASingleLine: false
AllowShortLoopsOnASingleLine: false
AlwaysBreakAfterDefinitionReturnType: None
AlwaysBreakAfterReturnType: AllDefinitions
AlwaysBreakBeforeMultilineStrings: false
AlwaysBreakTemplateDeclarations: Yes
BinPackArguments: true
BinPackParameters: true
BraceWrapping:
AfterClass: false
AfterControlStatement: false
AfterEnum: false
AfterFunction: true
AfterNamespace: false
AfterObjCDeclaration: false
AfterStruct: false
AfterUnion: false
BeforeCatch: false
BeforeElse: false
IndentBraces: false
BreakBeforeBinaryOperators: None
BreakBeforeBraces: Custom
BreakBeforeTernaryOperators: true
BreakConstructorInitializersBeforeComma: true
BreakAfterJavaFieldAnnotations: false
BreakStringLiterals: true
ColumnLimit: 90
CommentPragmas: '^ IWYU pragma:'
ConstructorInitializerAllOnOneLineOrOnePerLine: false
ConstructorInitializerIndentWidth: 4
ContinuationIndentWidth: 4
Cpp11BracedListStyle: true
DerivePointerAlignment: false
DisableFormat: false
ExperimentalAutoDetectBinPacking: false
ForEachMacros: [ foreach, Q_FOREACH, BOOST_FOREACH ]
IncludeCategories:
- Regex: '^"(llvm|llvm-c|clang|clang-c)/'
Priority: 2
- Regex: '^(<|"(gtest|isl|json)/)'
Priority: 3
- Regex: '.*'
Priority: 1
IncludeIsMainRegex: '$'
IndentCaseLabels: false
IndentWidth: 2
IndentWrappedFunctionNames: false
KeepEmptyLinesAtTheStartOfBlocks: true
MacroBlockBegin: ''
MacroBlockEnd: ''
MaxEmptyLinesToKeep: 1
NamespaceIndentation: None
ObjCBlockIndentWidth: 2
ObjCSpaceAfterProperty: false
ObjCSpaceBeforeProtocolList: true
PenaltyBreakBeforeFirstCallParameter: 19
PenaltyBreakComment: 300
PenaltyBreakFirstLessLess: 120
PenaltyBreakString: 1000
PenaltyExcessCharacter: 1000000
PenaltyReturnTypeOnItsOwnLine: 60
PointerAlignment: Left
ReflowComments: Always
SortIncludes: true
SpaceAfterCStyleCast: false
SpaceBeforeAssignmentOperators: true
SpaceBeforeParens: ControlStatements
SpaceInEmptyParentheses: false
SpacesBeforeTrailingComments: 1
SpacesInAngles: false
SpacesInContainerLiterals: true
SpacesInCStyleCastParentheses: false
SpacesInParentheses: false
SpacesInSquareBrackets: false
Standard: c++17
TabWidth: 8
UseTab: Never
JavaScriptQuotes: Leave
InsertBraces: true
================================================
FILE: .clang-tidy
================================================
---
Checks: 'clang-diagnostic-*,clang-analyzer-*,-clang-analyzer-alpha*,*,-cppcoreguidelines-pro-bounds-pointer-arithmetic,-cert-err61-cpp,-misc-throw-by-value-catch-by-reference,-clang-analyzer-alpha.deadcode.UnreachableCode,-cert-err58-cpp,-clang-analyzer-alpha.*,-clang-analyzer-security.insecureAPI.strcpy,-cppcoreguidelines-pro-type-vararg,-cppcoreguidelines-pro-type-reinterpret-cast,-google-runtime-int,-modernize-raw-string-literal,-cppcoreguidelines-pro-bounds-constant-array-index,-llvmlibc-*'
WarningsAsErrors: ''
HeaderFilterRegex: ''
AnalyzeTemporaryDtors: false
CheckOptions:
- key: cert-dcl59-cpp.HeaderFileExtensions
value: h,hh,hpp,hxx
- key: cert-err61-cpp.CheckThrowTemporaries
value: '1'
- key: cert-oop11-cpp.IncludeStyle
value: llvm
- key: cert-oop11-cpp.UseCERTSemantics
value: '1'
- key: cppcoreguidelines-pro-bounds-constant-array-index.GslHeader
value: ''
- key: cppcoreguidelines-pro-bounds-constant-array-index.IncludeStyle
value: '0'
- key: cppcoreguidelines-pro-type-member-init.IgnoreArrays
value: '0'
- key: google-build-namespaces.HeaderFileExtensions
value: h,hh,hpp,hxx
- key: google-global-names-in-headers.HeaderFileExtensions
value: h
- key: google-readability-braces-around-statements.ShortStatementLines
value: '1'
- key: google-readability-function-size.BranchThreshold
value: '4294967295'
- key: google-readability-function-size.LineThreshold
value: '4294967295'
- key: google-readability-function-size.StatementThreshold
value: '800'
- key: google-readability-namespace-comments.ShortNamespaceLines
value: '10'
- key: google-readability-namespace-comments.SpacesBeforeComments
value: '2'
- key: google-runtime-int.SignedTypePrefix
value: int
- key: google-runtime-int.TypeSuffix
value: ''
- key: google-runtime-int.UnsignedTypePrefix
value: uint
- key: llvm-namespace-comment.ShortNamespaceLines
value: '1'
- key: llvm-namespace-comment.SpacesBeforeComments
value: '1'
- key: misc-assert-side-effect.AssertMacros
value: assert
- key: misc-assert-side-effect.CheckFunctionCalls
value: '0'
- key: misc-dangling-handle.HandleClasses
value: 'std::basic_string_view;std::experimental::basic_string_view'
- key: misc-definitions-in-headers.HeaderFileExtensions
value: ',h,hh,hpp,hxx'
- key: misc-definitions-in-headers.UseHeaderFileExtension
value: '1'
- key: misc-misplaced-widening-cast.CheckImplicitCasts
value: '1'
- key: misc-move-constructor-init.IncludeStyle
value: llvm
- key: misc-move-constructor-init.UseCERTSemantics
value: '0'
- key: misc-sizeof-expression.WarnOnSizeOfCompareToConstant
value: '1'
- key: misc-sizeof-expression.WarnOnSizeOfConstant
value: '1'
- key: misc-sizeof-expression.WarnOnSizeOfThis
value: '1'
- key: misc-string-constructor.LargeLengthThreshold
value: '8388608'
- key: misc-string-constructor.WarnOnLargeLength
value: '1'
- key: misc-suspicious-missing-comma.MaxConcatenatedTokens
value: '5'
- key: misc-suspicious-missing-comma.RatioThreshold
value: '0.200000'
- key: misc-suspicious-missing-comma.SizeThreshold
value: '5'
- key: misc-suspicious-string-compare.StringCompareLikeFunctions
value: ''
- key: misc-suspicious-string-compare.WarnOnImplicitComparison
value: '1'
- key: misc-suspicious-string-compare.WarnOnLogicalNotComparison
value: '0'
- key: misc-throw-by-value-catch-by-reference.CheckThrowTemporaries
value: '1'
- key: modernize-loop-convert.MaxCopySize
value: '16'
- key: modernize-loop-convert.MinConfidence
value: reasonable
- key: modernize-loop-convert.NamingStyle
value: CamelCase
- key: modernize-pass-by-value.IncludeStyle
value: llvm
- key: modernize-replace-auto-ptr.IncludeStyle
value: llvm
- key: modernize-use-nullptr.NullMacros
value: 'NULL'
- key: performance-faster-string-find.StringLikeClasses
value: 'std::basic_string'
- key: performance-for-range-copy.WarnOnAllAutoCopies
value: '0'
- key: readability-braces-around-statements.ShortStatementLines
value: '1'
- key: readability-function-size.BranchThreshold
value: '4294967295'
- key: readability-function-size.LineThreshold
value: '4294967295'
- key: readability-function-size.StatementThreshold
value: '800'
- key: readability-identifier-naming.AbstractClassCase
value: aNy_CasE
- key: readability-identifier-naming.AbstractClassPrefix
value: ''
- key: readability-identifier-naming.AbstractClassSuffix
value: ''
- key: readability-identifier-naming.ClassCase
value: aNy_CasE
- key: readability-identifier-naming.ClassConstantCase
value: aNy_CasE
- key: readability-identifier-naming.ClassConstantPrefix
value: ''
- key: readability-identifier-naming.ClassConstantSuffix
value: ''
- key: readability-identifier-naming.ClassMemberCase
value: aNy_CasE
- key: readability-identifier-naming.ClassMemberPrefix
value: ''
- key: readability-identifier-naming.ClassMemberSuffix
value: ''
- key: readability-identifier-naming.ClassMethodCase
value: aNy_CasE
- key: readability-identifier-naming.ClassMethodPrefix
value: ''
- key: readability-identifier-naming.ClassMethodSuffix
value: ''
- key: readability-identifier-naming.ClassPrefix
value: ''
- key: readability-identifier-naming.ClassSuffix
value: ''
- key: readability-identifier-naming.ConstantCase
value: aNy_CasE
- key: readability-identifier-naming.ConstantMemberCase
value: aNy_CasE
- key: readability-identifier-naming.ConstantMemberPrefix
value: ''
- key: readability-identifier-naming.ConstantMemberSuffix
value: ''
- key: readability-identifier-naming.ConstantParameterCase
value: aNy_CasE
- key: readability-identifier-naming.ConstantParameterPrefix
value: ''
- key: readability-identifier-naming.ConstantParameterSuffix
value: ''
- key: readability-identifier-naming.ConstantPrefix
value: ''
- key: readability-identifier-naming.ConstantSuffix
value: ''
- key: readability-identifier-naming.ConstexprFunctionCase
value: aNy_CasE
- key: readability-identifier-naming.ConstexprFunctionPrefix
value: ''
- key: readability-identifier-naming.ConstexprFunctionSuffix
value: ''
- key: readability-identifier-naming.ConstexprMethodCase
value: aNy_CasE
- key: readability-identifier-naming.ConstexprMethodPrefix
value: ''
- key: readability-identifier-naming.ConstexprMethodSuffix
value: ''
- key: readability-identifier-naming.ConstexprVariableCase
value: aNy_CasE
- key: readability-identifier-naming.ConstexprVariablePrefix
value: ''
- key: readability-identifier-naming.ConstexprVariableSuffix
value: ''
- key: readability-identifier-naming.EnumCase
value: aNy_CasE
- key: readability-identifier-naming.EnumConstantCase
value: aNy_CasE
- key: readability-identifier-naming.EnumConstantPrefix
value: ''
- key: readability-identifier-naming.EnumConstantSuffix
value: ''
- key: readability-identifier-naming.EnumPrefix
value: ''
- key: readability-identifier-naming.EnumSuffix
value: ''
- key: readability-identifier-naming.FunctionCase
value: aNy_CasE
- key: readability-identifier-naming.FunctionPrefix
value: ''
- key: readability-identifier-naming.FunctionSuffix
value: ''
- key: readability-identifier-naming.GlobalConstantCase
value: aNy_CasE
- key: readability-identifier-naming.GlobalConstantPrefix
value: ''
- key: readability-identifier-naming.GlobalConstantSuffix
value: ''
- key: readability-identifier-naming.GlobalFunctionCase
value: aNy_CasE
- key: readability-identifier-naming.GlobalFunctionPrefix
value: ''
- key: readability-identifier-naming.GlobalFunctionSuffix
value: ''
- key: readability-identifier-naming.GlobalVariableCase
value: aNy_CasE
- key: readability-identifier-naming.GlobalVariablePrefix
value: ''
- key: readability-identifier-naming.GlobalVariableSuffix
value: ''
- key: readability-identifier-naming.IgnoreFailedSplit
value: '0'
- key: readability-identifier-naming.InlineNamespaceCase
value: aNy_CasE
- key: readability-identifier-naming.InlineNamespacePrefix
value: ''
- key: readability-identifier-naming.InlineNamespaceSuffix
value: ''
- key: readability-identifier-naming.LocalConstantCase
value: aNy_CasE
- key: readability-identifier-naming.LocalConstantPrefix
value: ''
- key: readability-identifier-naming.LocalConstantSuffix
value: ''
- key: readability-identifier-naming.LocalVariableCase
value: aNy_CasE
- key: readability-identifier-naming.LocalVariablePrefix
value: ''
- key: readability-identifier-naming.LocalVariableSuffix
value: ''
- key: readability-identifier-naming.MemberCase
value: aNy_CasE
- key: readability-identifier-naming.MemberPrefix
value: ''
- key: readability-identifier-naming.MemberSuffix
value: ''
- key: readability-identifier-naming.MethodCase
value: aNy_CasE
- key: readability-identifier-naming.MethodPrefix
value: ''
- key: readability-identifier-naming.MethodSuffix
value: ''
- key: readability-identifier-naming.NamespaceCase
value: aNy_CasE
- key: readability-identifier-naming.NamespacePrefix
value: ''
- key: readability-identifier-naming.NamespaceSuffix
value: ''
- key: readability-identifier-naming.ParameterCase
value: aNy_CasE
- key: readability-identifier-naming.ParameterPackCase
value: aNy_CasE
- key: readability-identifier-naming.ParameterPackPrefix
value: ''
- key: readability-identifier-naming.ParameterPackSuffix
value: ''
- key: readability-identifier-naming.ParameterPrefix
value: ''
- key: readability-identifier-naming.ParameterSuffix
value: ''
- key: readability-identifier-naming.PrivateMemberCase
value: aNy_CasE
- key: readability-identifier-naming.PrivateMemberPrefix
value: ''
- key: readability-identifier-naming.PrivateMemberSuffix
value: ''
- key: readability-identifier-naming.PrivateMethodCase
value: aNy_CasE
- key: readability-identifier-naming.PrivateMethodPrefix
value: ''
- key: readability-identifier-naming.PrivateMethodSuffix
value: ''
- key: readability-identifier-naming.ProtectedMemberCase
value: aNy_CasE
- key: readability-identifier-naming.ProtectedMemberPrefix
value: ''
- key: readability-identifier-naming.ProtectedMemberSuffix
value: ''
- key: readability-identifier-naming.ProtectedMethodCase
value: aNy_CasE
- key: readability-identifier-naming.ProtectedMethodPrefix
value: ''
- key: readability-identifier-naming.ProtectedMethodSuffix
value: ''
- key: readability-identifier-naming.PublicMemberCase
value: aNy_CasE
- key: readability-identifier-naming.PublicMemberPrefix
value: ''
- key: readability-identifier-naming.PublicMemberSuffix
value: ''
- key: readability-identifier-naming.PublicMethodCase
value: aNy_CasE
- key: readability-identifier-naming.PublicMethodPrefix
value: ''
- key: readability-identifier-naming.PublicMethodSuffix
value: ''
- key: readability-identifier-naming.StaticConstantCase
value: aNy_CasE
- key: readability-identifier-naming.StaticConstantPrefix
value: ''
- key: readability-identifier-naming.StaticConstantSuffix
value: ''
- key: readability-identifier-naming.StaticVariableCase
value: aNy_CasE
- key: readability-identifier-naming.StaticVariablePrefix
value: ''
- key: readability-identifier-naming.StaticVariableSuffix
value: ''
- key: readability-identifier-naming.StructCase
value: aNy_CasE
- key: readability-identifier-naming.StructPrefix
value: ''
- key: readability-identifier-naming.StructSuffix
value: ''
- key: readability-identifier-naming.TemplateParameterCase
value: aNy_CasE
- key: readability-identifier-naming.TemplateParameterPrefix
value: ''
- key: readability-identifier-naming.TemplateParameterSuffix
value: ''
- key: readability-identifier-naming.TemplateTemplateParameterCase
value: aNy_CasE
- key: readability-identifier-naming.TemplateTemplateParameterPrefix
value: ''
- key: readability-identifier-naming.TemplateTemplateParameterSuffix
value: ''
- key: readability-identifier-naming.TypeTemplateParameterCase
value: aNy_CasE
- key: readability-identifier-naming.TypeTemplateParameterPrefix
value: ''
- key: readability-identifier-naming.TypeTemplateParameterSuffix
value: ''
- key: readability-identifier-naming.TypedefCase
value: aNy_CasE
- key: readability-identifier-naming.TypedefPrefix
value: ''
- key: readability-identifier-naming.TypedefSuffix
value: ''
- key: readability-identifier-naming.UnionCase
value: aNy_CasE
- key: readability-identifier-naming.UnionPrefix
value: ''
- key: readability-identifier-naming.UnionSuffix
value: ''
- key: readability-identifier-naming.ValueTemplateParameterCase
value: aNy_CasE
- key: readability-identifier-naming.ValueTemplateParameterPrefix
value: ''
- key: readability-identifier-naming.ValueTemplateParameterSuffix
value: ''
- key: readability-identifier-naming.VariableCase
value: aNy_CasE
- key: readability-identifier-naming.VariablePrefix
value: ''
- key: readability-identifier-naming.VariableSuffix
value: ''
- key: readability-identifier-naming.VirtualMethodCase
value: aNy_CasE
- key: readability-identifier-naming.VirtualMethodPrefix
value: ''
- key: readability-identifier-naming.VirtualMethodSuffix
value: ''
- key: readability-simplify-boolean-expr.ChainedConditionalAssignment
value: '0'
- key: readability-simplify-boolean-expr.ChainedConditionalReturn
value: '0'
================================================
FILE: .codeclimate.yml
================================================
plugins:
cppcheck:
enabled: true
================================================
FILE: .ctest/config.cmake
================================================
# This file is meant to contain set commands for options
# you'd normally set at configuration when calling CMake.
# For example, to compile with C++20, uncomment the line below
# set(CMAKE_CXX_STANDARD 20 CACHE STRING "C++Standard")
================================================
FILE: .git-blame-ignore-revs
================================================
# This file contains a list of commits that should be ignored by some git commands including `git blame`.
# You may need to enable this feature in your git configuration by running:
# - git config blame.ignoreRevsFile .git-blame-ignore-revs
# Web interfaces of git such as GitHub and GitLab also support this feature.
# Commits that do not add or change any functionality but only reformat the code or fix typos should be added to this file. This way, when you run `git blame` on a file, you won't see these commits and can focus on the commits that actually changed the code.
# The following command can be run to check if any of the commits in this file are missing from the history:
# - for rev in $(grep -vE '^#|^$' .git-blame-ignore-revs); do git rev-parse -q --verify "${rev}^{commit}" >/dev/null || echo "Missing: $rev"; done
fd828b6155b3caf28676b4568065c0911529618b
f4b1931dd80ca8708b7802080eb468d5f7a19bda
683def495afe2567b108fb0de9ad8d3eb1ec43c8
================================================
FILE: .gitignore
================================================
*.lo
*.o
*.la
*.pyc
.libs
.deps
Makefile
Makefile.in
Console/eos
console/iam.cfg
XrdMqOfs/xrdmqclientmaster
XrdMqOfs/xrdmqclienttest
XrdMqOfs/xrdmqclientworker
XrdMqOfs/xrdmqcryptotest
XrdMqOfs/xrdmqdumper
aclocal.m4
autom4te.cache
config.guess
config.log
config.status
config.sub
configure
depcomp
eos.spec
install-sh
libtool
ltmain.sh
missing
doxy.log
doxydoc
*~
Namespace/tests/text_runner
valgrind.supp
eos-log-repair
/build*/
__pycache__
kineticio-dist.tgz
# clion specific configs
/cmake-build*/
/.idea/
# eclipse specific configs
/.settings/
.cproject
.project
.vscode
nbproject
my_clang_cache.cmake
# ccache specific configs
/ccache/
# documentation artifacts
doc/_build/
doc/html/
debian/control
# ApMon specific
ApMon/*.tar.gz
ApMon/rpmbuild*
ApMon/eos-apmon-*
!ApMon/Makefile
# clangd file
compile_commands.json
.cache
================================================
FILE: .gitlab-ci.yml
================================================
# ************************************************************************
# * EOS - the CERN Disk Storage System *
# * Copyright (C) 2023 CERN/Switzerland *
# * *
# * This program is free software: you can redistribute it and/or modify *
# * it under the terms of the GNU General Public License as published by *
# * the Free Software Foundation, either version 3 of the License, or *
# * (at your option) any later version. *
# * *
# * This program is distributed in the hope that it will be useful, *
# * but WITHOUT ANY WARRANTY; without even the implied warranty of *
# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
# * GNU General Public License for more details. *
# * *
# * You should have received a copy of the GNU General Public License *
# * along with this program. If not, see .*
# ************************************************************************
stages:
- build:manual
- pre-commit
- prebuild
- build:rpm
- build:dockerimage
- test
- publish
- clean
variables:
CODENAME: "diopside"
default:
interruptible: true
before_script:
- source gitlab-ci/export_commit-type.sh
- echo "Exporting COMMIT_TYPE=${COMMIT_TYPE}"
include:
# - template: Code-Quality.gitlab-ci.yml
# - local: /gitlab-ci/.gitlab-ci-test-dock_include.yml @note on the file
# - local: /gitlab-ci/.gitalb-ci-build-macos.yml
- local: /gitlab-ci/.gitlab-ci-build-ubuntu.yml
- local: /gitlab-ci/.gitlab-ci-test-k8s_include.yml
- local: /gitlab-ci/.gitlab-ci-test-helm_include.yml
workflow:
auto_cancel:
on_new_commit: interruptible
rules:
- if: $CI_COMMIT_BRANCH
variables:
#KOJI_SCRATCH: "--scratch --skip-tag"
KOJI_SCRATCH: "--scratch"
- if: $CI_COMMIT_TAG
variables:
KOJI_SCRATCH: ""
#-------------------------------------------------------------------------------
# Prebuild
#-------------------------------------------------------------------------------
.doc-skip:
stage: .pre
script:
- |
if git diff --name-only $CI_COMMIT_BEFORE_SHA..$CI_COMMIT_SHA | grep -qv '^docs/'; then
echo "Non doc changes detected, running full pipeline"
exit 0
fi
echo "Documentation-only changes detected. Skipping pipeline."
# In an ideal world this job failing at the top should not trigger builds
# we don't live in that world! In order to avoid creating more complex
# dependency graphs, we just cancel the pipeline
curl -X POST -H "PRIVATE-TOKEN: $GITLAB_CI_TOKEN" "$CI_API_V4_URL/projects/$CI_PROJECT_ID/pipelines/$CI_PIPELINE_ID/cancel"
exit 1
allow_failure: true
rules:
- if: '$CI_PIPELINE_SOURCE == "schedule"'
when: never
- changes:
- docs/**/*
when: always
- when: never
.prebuild-template: &prebuild-template_definition
stage: prebuild
image:
name: gcr.io/kaniko-project/executor:debug
entrypoint: [""]
script:
- export DESTINATION="gitlab-registry.cern.ch/dss/eos/prebuild-${PREBUILD_NAME}-${CODENAME}"
- export DOCKERFILE="$CI_PROJECT_DIR/gitlab-ci/prebuild_OSbase/prebuild-${PREBUILD_NAME}.Dockerfile"
- echo "{\"auths\":{\"$CI_REGISTRY\":{\"auth\":\"$(echo -n $CI_REGISTRY_USER:$CI_REGISTRY_PASSWORD | base64)\"}}}" > /kaniko/.docker/config.json
# no need yet for --build-arg PREBUILD_NAME="$PREBUILD_NAME" --build-arg CMAKE_OPTIONS="$CMAKE_OPTIONS" --build-arg CXXFLAGS="$CXXFLAGS"
- /kaniko/executor --cache="false" --destination $DESTINATION --dockerfile $DOCKERFILE --context $CI_PROJECT_DIR --build-arg=EOS_CODENAME="${CODENAME}" --compressed-caching=false --use-new-run
only:
variables:
- $PREBUILD_TRIGGER
prebuild-el8:
extends: .prebuild-template
variables:
PREBUILD_NAME: el8
prebuild-el9:
extends: .prebuild-template
variables:
PREBUILD_NAME: el9
prebuild-el10:
extends: .prebuild-template
variables:
PREBUILD_NAME: el10
prebuild-el9-arm64:
extends: .prebuild-template
variables:
PREBUILD_NAME: el9-arm64
tags:
- k8s-arm
.prebuild-el9_coverage:
extends: .prebuild-template
variables:
PREBUILD_NAME: el9_coverage
only:
variables:
- $COVERAGE_SCHEDULE
clone_docker:
stage: build:rpm
image: gitlab-registry.cern.ch/linuxsupport/alma9-base
script:
- dnf install --nogpg -y git
- git clone https://gitlab.cern.ch/eos/eos-docker.git
artifacts:
expire_in: 1 day
paths:
- eos-docker/
#-------------------------------------------------------------------------------
# Build RPMs
#-------------------------------------------------------------------------------
.build-template: &build-template_definition
stage: build:rpm
variables:
PKG_MGR: dnf
CMAKE_BIN: cmake
script:
- git submodule sync --recursive && git submodule update --init -f --recursive
- mkdir build; cd build; ${CMAKE_BIN} .. -DPACKAGEONLY=1 -DEOS_GRPC_GW=1 -Wno-dev; make srpm; cd ..;
- echo -e "[eos-depend]\nname=EOS dependencies\nbaseurl=http://storage-ci.web.cern.ch/storage-ci/eos/${CODENAME}-depend/el-$(rpm --eval '%{rhel}')/$(uname -m)/\ngpgcheck=0\nenabled=1\npriority=2\n" >> /etc/yum.repos.d/eos-depend.repo
- |
if [[ ${PKG_MGR} == "yum" ]]; then
${PKG_MGR} remove --nogpgcheck -y eos-xrootd;
${PKG_MGR}-builddep --nogpgcheck -y --setopt="cern*.exclude=xrootd*" build/SRPMS/*;
else
${PKG_MGR} builddep --nogpgcheck --allowerasing -y --setopt="cern*.exclude=xrootd*" build/SRPMS/*;
fi
- |
if [[ -n "$CI_COMMIT_TAG" ]]; then
export CCACHE_DISABLE=1;
${PKG_MGR} install -y gnupg2;
else
source gitlab-ci/setup_ccache.sh;
fi
- rpmbuild --rebuild --with server --with eos_grpc_gateway --define "_rpmdir build/RPMS/" --define "_build_name_fmt %%{NAME}-%%{VERSION}-%%{RELEASE}.%%{ARCH}.rpm" build/SRPMS/* | (ts 2> /dev/null || true; tee)
- ccache -s
- if [[ -n "$CI_COMMIT_TAG" ]]; then gpg2 --batch --import $STCI_REPO_KEY; printf "" | setsid rpmsign --define='%_gpg_name stci@cern.ch' --define='%_signature gpg' --addsign build/RPMS/*.rpm; fi
- mkdir ${BUILD_NAME}_artifacts; cp -rv build/*RPMS/ build/eos-*.tar.gz ${BUILD_NAME}_artifacts
cache:
key: "$CI_JOB_NAME-$CI_COMMIT_REF_SLUG"
paths:
- ccache/
artifacts:
expire_in: 60 days
paths:
- ${BUILD_NAME}_artifacts/
build_el8:
image: gitlab-registry.cern.ch/dss/eos/prebuild-el8-${CODENAME}
variables:
BUILD_NAME: el-8
extends: .build-template
only:
- schedules
- tags
build_el9:
image: gitlab-registry.cern.ch/dss/eos/prebuild-el9-${CODENAME}
variables:
BUILD_NAME: el-9
extends: .build-template
build_el10:
image: gitlab-registry.cern.ch/dss/eos/prebuild-el10-${CODENAME}
variables:
BUILD_NAME: el-10
extends: .build-template
only:
- schedules
- tags
build_el9_arm64:
image: gitlab-registry.cern.ch/dss/eos/prebuild-el9-arm64-${CODENAME}
variables:
BUILD_NAME: el-9-arm64
extends: .build-template
tags:
- k8s-arm
only:
- schedules
- tags
#-------------------------------------------------------------------------------
# EOS client builds for RHEL
#-------------------------------------------------------------------------------
.build-client-srpm-template: &build-client-srpm-template_definition
stage: build:rpm
script:
- dnf config-manager --add-repo https://linuxsoft.cern.ch/cern/rhel/$(rpm --eval '%{rhel}')/CERN/$(uname -m)/ --set-enabled
- dnf install cern-gpg-keys --nogpgcheck -y
- rpm --import /etc/pki/rpm-gpg/* || true
- dnf install rpm-build cmake gcc-c++ git -y
- git submodule sync --recursive && git submodule update --init -f --recursive
- mkdir -pv build; cd build;
- cmake ../ -DPACKAGEONLY=1 -DCLIENT=1 -Wno-dev
- make srpm; cd ..;
- mkdir -p ${CI_JOB_NAME}_artifacts
- cp -rv build/SRPMS/ ${CI_JOB_NAME}_artifacts
artifacts:
expire_in: 60 days
paths:
- ${CI_JOB_NAME}_artifacts
only:
- schedules
- tags
rh-8:
extends: .build-client-srpm-template
image: gitlab-registry.cern.ch/linuxsupport/ubi8/ubi
rh-9:
extends: .build-client-srpm-template
image: gitlab-registry.cern.ch/linuxsupport/ubi9/ubi
rh-10:
extends: .build-client-srpm-template
image: gitlab-registry.cern.ch/linuxsupport/ubi10/ubi
#-------------------------------------------------------------------------------
# Fedora builds
#-------------------------------------------------------------------------------
.build-fedora-template: &build-fedora-template_definition
stage: build:rpm
script:
- dnf install --nogpg -y gcc-c++ cmake make rpm-build which git tar dnf-plugins-core ccache rpm-sign
- git submodule sync --recursive && git submodule update --init -f --recursive
- mkdir build; cd build
- cmake .. -DPACKAGEONLY=1 -Wno-dev; make srpm; cd ..
- echo -e "[eos-depend]\nname=EOS dependencies\nbaseurl=http://storage-ci.web.cern.ch/storage-ci/eos/${CODENAME}-depend/${BUILD_NAME}/x86_64/\ngpgcheck=0\nenabled=1\nexclude=xrootd*\npriority=4\n" > /etc/yum.repos.d/eos-depend.repo
- dnf builddep --nogpgcheck --allowerasing -y build/SRPMS/*
- if [[ -n "$CI_COMMIT_TAG" ]]; then export CCACHE_DISABLE=1; else source gitlab-ci/setup_ccache_fc.sh; fi
- rpmbuild --rebuild --with server --define "_rpmdir build/RPMS/" --define "_build_name_fmt %%{NAME}-%%{VERSION}-%%{RELEASE}.%%{ARCH}.rpm" build/SRPMS/*
- ccache -s
- if [[ -n "$CI_COMMIT_TAG" ]]; then gpg2 --batch --import $STCI_REPO_KEY; printf "" | setsid rpmsign --define='%_gpg_name stci@cern.ch' --define='%_signature gpg' --addsign build/RPMS/*.rpm; fi
- mkdir ${BUILD_NAME}_artifacts; cp -R build/SRPMS build/RPMS ${BUILD_NAME}_artifacts
cache:
key: "$CI_JOB_NAME-$CI_COMMIT_REF_SLUG"
paths:
- ccache/
artifacts:
expire_in: 60 days
paths:
- ${BUILD_NAME}_artifacts/
allow_failure: true
build_fedora_38:
extends: .build-fedora-template
image: registry.fedoraproject.org/fedora:38
variables:
BUILD_NAME: fc-38
only:
- schedules
- tags
build_fedora_rawhide:
extends: .build-fedora-template
image: registry.fedoraproject.org/fedora:rawhide
variables:
BUILD_NAME: fc-rawhide
only:
- schedules
when: manual
#-------------------------------------------------------------------------------
# Exotic builds
#-------------------------------------------------------------------------------
.build_exotic-template: &build_exotic-template_definition
stage: build:rpm
variables:
PKG_MGR: dnf
CMAKE_CMD: cmake3
script:
- export DIST=$(rpm --eval '%{rhel}')
- ${PKG_MGR} install -y git ccache tar sudo which tar gzip moreutils
- git submodule sync --recursive && git submodule update --init -f --recursive
- mkdir build; cd build
- ${CMAKE_CMD} .. -DPACKAGEONLY=1 ${CMAKE_OPTIONS} -Wno-dev
- make srpm; cd ..;
- |
if [[ "$RPMBUILD_OPTIONS" == *asan* ]]; then
echo -e "[eos-asan-depend]\nname=EOS dependencies\nbaseurl=http://storage-ci.web.cern.ch/storage-ci/eos/${CODENAME}-depend/el-${DIST}-asan/x86_64/\ngpgcheck=0\nenabled=1\npriority=2\n" > /etc/yum.repos.d/eos-depend.repo;
# Install the asan enabled dependencies
${PKG_MGR} remove -y eos-xrootd eos-folly eos-grpc eos-rocksdb || true;
elif [[ "$RPMBUILD_OPTIONS" == *tsan* ]]; then
echo -e "[eos-tsan-depend]\nname=EOS dependencies\nbaseurl=http://storage-ci.web.cern.ch/storage-ci/eos/${CODENAME}-depend/el-${DIST}-tsan/x86_64/\ngpgcheck=0\nenabled=1\npriority=2\n" > /etc/yum.repos.d/eos-depend.repo;
# Install the tsan enabled dependencies
${PKG_MGR} remove -y eos-xrootd eos-folly eos-grpc eos-rocksdb || true;
else
echo -e "[eos-depend]\nname=EOS dependencies\nbaseurl=http://storage-ci.web.cern.ch/storage-ci/eos/${CODENAME}-depend/el-${DIST}/x86_64/\ngpgcheck=0\nenabled=1\npriority=2\n" > /etc/yum.repos.d/eos-depend.repo;
fi
- |
if [[ ${PKG_MGR} == "yum" ]]; then
${PKG_MGR}-builddep --nogpgcheck --setopt="cern*.exclude=xrootd*" -y build/SRPMS/*
else
${PKG_MGR} install -y dnf-plugins-core
${PKG_MGR} builddep --nogpgcheck --setopt="cern*.exclude=xrootd*" -y build/SRPMS/*
fi
- mkdir -p ${BUILD_NAME}_artifacts
- if [[ -n "$CI_COMMIT_TAG" ]]; then export CCACHE_DISABLE=1; else source gitlab-ci/setup_ccache.sh; fi
- rpmbuild --rebuild ${RPMBUILD_OPTIONS} --define "_rpmdir build/RPMS/" --define "_build_name_fmt %%{NAME}-%%{VERSION}-%%{RELEASE}.%%{ARCH}.rpm" build/SRPMS/* | ts
- ccache -s
- if [[ -n "$CI_COMMIT_TAG" ]]; then gpg2 --batch --import $STCI_REPO_KEY; printf "" | setsid rpmsign --define='%_gpg_name stci@cern.ch' --define='%_signature gpg' --addsign build/RPMS/*.rpm; fi
- cp -R build/SRPMS/ build/RPMS/ ${BUILD_NAME}_artifacts
cache:
key: "$CI_JOB_NAME-$CI_COMMIT_REF_SLUG"
paths:
- ccache/
artifacts:
expire_in: 1 day
paths:
- ${BUILD_NAME}_artifacts/
dependencies: []
allow_failure: true
build_el9_asan:
image: gitlab-registry.cern.ch/dss/eos/prebuild-el9-${CODENAME}
variables:
CMAKE_CMD: cmake
BUILD_NAME: el-9-asan
CMAKE_OPTIONS: "-DASAN=1"
RPMBUILD_OPTIONS: "--with server --with asan"
CXXFLAGS: "-Wno-parentheses" # Avoid boost header compilation errors
before_script:
- ${PKG_MGR} install -y epel-release libasan cmake gcc gcc-c++ rpmdevtools
extends: .build_exotic-template
when: manual
build_client_el9_tsan:
image: gitlab-registry.cern.ch/dss/eos/prebuild-el9-${CODENAME}
variables:
CMAKE_CMD: cmake
BUILD_NAME: el-9-tsan
CMAKE_OPTIONS: "-DTSAN=1"
RPMBUILD_OPTIONS: "--with tsan"
CXXFLAGS: "-Wno-parentheses" # Avoid boost header compilation errors
before_script:
- ${PKG_MGR} install -y epel-release libtsan cmake gcc gcc-c++ which rpmdevtools
extends: .build_exotic-template
when: manual
build_el9_clang:
image: gitlab-registry.cern.ch/dss/eos/prebuild-el9-${CODENAME}
variables:
BUILD_NAME: el-9-clang
CMAKE_OPTIONS: "-DCLANG_BUILD=1"
RPMBUILD_OPTIONS: "--with clang --with server"
extends: .build_exotic-template
only:
- schedules
- triggers
# @note Please contact CTA team / jleduc if you want to change this job
build_cc7_opt_xrootd:
image: gitlab-registry.cern.ch/dss/eos/prebuild-cc7-${CODENAME}
variables:
PKG_MGR: yum
BUILD_NAME: cc7_opt_xrootd
CMAKE_OPTIONS: "-DEOS_XROOTD=1"
RPMBUILD_OPTIONS: "--with eos_xrootd_rh"
before_script:
- sed -i "s/pgm \/usr\/bin\/xrdcp/pgm \/bin\/true/g" misc/etc/xrd.cf.fst
except:
- tags
extends: .build_exotic-template
when: manual
build_el9_coverage:
image: gitlab-registry.cern.ch/dss/eos/prebuild-el9-${CODENAME}
variables:
BUILD_NAME: el9_coverage
RPMBUILD_OPTIONS: "--with coverage"
only:
variables:
- $COVERAGE_SCHEDULE
extends: .build_exotic-template
#-------------------------------------------------------------------------------
# Build docker images
#-------------------------------------------------------------------------------
.build_dockerimage-template:
stage: build:dockerimage
image:
name: gcr.io/kaniko-project/executor:debug
entrypoint: [""]
variables:
EXTRA_TAG: ""
script:
# @note keep $CACHE orthogonal to $IMAGE_TAG, do not join the "if"s
- if [[ -n "$CI_COMMIT_TAG" ]] || [[ "x$CI_PIPELINE_SOURCE" == "xschedule" ]];
then CACHE="false";
else CACHE="true";
fi
- if [[ -n "$CI_COMMIT_TAG" ]];
then IMAGE_TAG="$CI_COMMIT_TAG${OS_TAG}${EXTRA_TAG}";
else IMAGE_TAG="$CI_COMMIT_SHORT_SHA${OS_TAG}${EXTRA_TAG}";
fi
- IMAGE_REPO="gitlab-registry.cern.ch/dss/eos/eos-ci"
- DESTINATION="${IMAGE_REPO}:${IMAGE_TAG}"
- echo "CACHE=$CACHE - DESTINATION=$DESTINATION"
- echo "{\"auths\":{\"$CI_REGISTRY\":{\"auth\":\"$(echo -n $CI_REGISTRY_USER:$CI_REGISTRY_PASSWORD | base64)\"}}}" > /kaniko/.docker/config.json
- /kaniko/executor --cache=$CACHE --destination $DESTINATION --dockerfile $DOCKERFILE --context $CI_PROJECT_DIR --build-arg=EOS_CODENAME="${CODENAME}" --compressed-caching=false --use-new-run
retry: 1
el9_docker_image:
extends: .build_dockerimage-template
variables:
DOCKERFILE: eos-docker/Dockerfile_el9
OS_TAG: ".el9"
needs:
- job: clone_docker
- job: build_el9
el10_docker_image:
extends: .build_dockerimage-template
variables:
DOCKERFILE: eos-docker/Dockerfile_el10
OS_TAG: ".el10"
needs:
- job: clone_docker
- job: build_el10
only:
- schedules
- tags
el9_asan_docker_image:
extends: .build_dockerimage-template
variables:
DOCKERFILE: eos-docker/Dockerfile_el9_asan
EXTRA_TAG: "_asan"
OS_TAG: ".el9"
needs:
- job: clone_docker
- job: build_el9_asan
when: manual
allow_failure: true
.el9_coverage_docker_image:
extends: .build_dockerimage-template
variables:
DOCKERFILE: eos-docker/Dockerfile_coverage
EXTRA_TAG: "_coverage"
OS_TAG: ".el9"
needs:
- job: clone_docker
- job: build_el9_coverage
only:
variables:
- $COVERAGE_SCHEDULE
allow_failure: true
#-------------------------------------------------------------------------------
# Code quality, from codeclimate plugins - disabled
#-------------------------------------------------------------------------------
# .code_quality:
# artifacts:
# paths: [gl-code-quality-report.json]
# rules:
# - if: '$CI_PIPELINE_SOURCE == "schedule"'
# allow_failure: true
# .code_quality_html:
# extends: code_quality
# variables:
# REPORT_FORMAT: html
# artifacts:
# paths: [gl-code-quality-report.html]
#-------------------------------------------------------------------------------
# Dock8rnetes testing framework (exec_cmd wraps both docker and k8s!)
#-------------------------------------------------------------------------------
.dock8s_before_script_template: &dock8s_before_script_template
stage: test
before_script:
- case $CI_JOB_NAME in
"k8s"* )
source ./gitlab-ci/before_script_k8s_test.sh;
source ./gitlab-ci/utilities_func_for_tests.sh --type k8s $K8S_NAMESPACE ;;
"dock"* )
source ./gitlab-ci/before_script_docker_test.sh;
source ./gitlab-ci/utilities_func_for_tests.sh --type docker; ;;
esac
variables:
OS_TAG: ".el9"
.dock8s_after_script_template: &dock8s_after_script_template
after_script:
- case $CI_JOB_NAME in
"k8s"* )
source ./gitlab-ci/after_script_k8s_test.sh ;;
"dock"* )
source ./gitlab-ci/after_script_docker_test.sh ;;
esac
.dock8s_system_test_template:
extends:
- .dock8s_before_script_template
- .dock8s_after_script_template
script:
- date
- exec_cmd eos-mgm1 'eos ns mutex --toggleorder'
- exec_cmd eos-mgm1 'eos-instance-test-ci'
- date
- exec_cmd eos-mgm1 'eos-unit-tests-with-instance -n root://localhost//eos/dockertest/'
- exec_cmd eos-mgm1 'grep "RWMutex. Order Checking Error in thread" /var/log/eos/mgm/xrdlog.mgm && exit 1 || exit 0'
- date
- cp_to_local_cmd eos-cli1:/usr/sbin/eos-test-utils ./eos-test-utils; chmod +x eos-test-utils
- cp_to_local_cmd eos-cli1:/usr/sbin/eos-fst-close-test ./eos-fst-close-test; chmod +x eos-fst-close-test
- case $CI_JOB_NAME in
"k8s"* )
export EOS_MGM_URL="root://eos-mgm1.eos-mgm1.$K8S_NAMESPACE.svc.cluster.local";
./eos-fst-close-test --mgm ${EOS_MGM_URL} --type k8s $K8S_NAMESPACE ;;
"dock"* )
./eos-fst-close-test --type docker ;;
esac
- date
artifacts:
when: on_failure
expire_in: 3 days
paths:
- eos-logs-${CI_JOB_ID}/
.dock8s_convert_fsck_recycle_template:
extends:
- .dock8s_before_script_template
- .dock8s_after_script_template
script:
- cp_to_local_cmd eos-cli1:/usr/sbin/eos-test-utils ./eos-test-utils; chmod +x eos-test-utils
- cp_to_local_cmd eos-cli1:/usr/sbin/eos-convert-test ./eos-convert-test; chmod +x eos-convert-test
- cp_to_local_cmd eos-cli1:/usr/sbin/eos-fsck-test ./eos-fsck-test; chmod +x eos-fsck-test
- cp_to_local_cmd eos-cli1:/usr/sbin/eos-recycle-test ./eos-recycle-test; chmod +x eos-recycle-test
- case $CI_JOB_NAME in
"k8s"* )
./eos-convert-test --type k8s $K8S_NAMESPACE;
./eos-fsck-test --max-delay 600 --type k8s $K8S_NAMESPACE;
./eos-recycle-test --type k8s $K8S_NAMESPACE;;
"dock"* )
./eos-convert-test --type docker;
./eos-fsck-test --max-delay 600 --type docker;
./eos-recycle-test --type docker;;
esac
- rm -rf eos-test-utils
- rm -rf eos-convert-test
- rm -rf eos-fsck-test
- rm -rf eos-recycle-test
artifacts:
when: on_failure
expire_in: 3 days
paths:
- eos-logs-${CI_JOB_ID}/
.dock8s_rtb_clone_template:
extends:
- .dock8s_before_script_template
- .dock8s_after_script_template
script:
# prepare mountpoints
- exec_cmd eos-cli1 'atd; at now <<< "mkdir -p /eos1/ && mount -t fuse eosxd -ofsname=mount-1 /eos1/; mkdir -p /eos2/ && mount -t fuse eosxd -ofsname=mount-2 /eos2/;"'
- exec_cmd eos-cli1 'count=0; while [[ $count -le 10 ]] && ( [[ ! -d /eos1/dockertest/ ]] || [[ ! -d /eos2/dockertest/ ]] ); do echo "Wait for mount... $count"; (( count++ )); sleep 1; done;'
# download tests repo
- exec_cmd eos-cli1 'git clone https://gitlab.cern.ch/dss/eosclient-tests.git'
- exec_cmd eos-cli1 'cd /eosclient-tests && pip install -r requirements.txt'
# ubuntu releases do not support 'clone' yet, skip its test
- case $CI_JOB_NAME in
"ub_focal"* | "ub_jammy"* ) ;;
* ) exec_cmd eos-cli1 'cd /eosclient-tests; clone_tests/clone_test.sh prepare; rc=$?; exit $rc' ;;
esac
artifacts:
when: on_failure
expire_in: 3 days
paths:
- eos-logs-${CI_JOB_ID}/
.dock8s_fusex_test_template:
extends:
- .dock8s_before_script_template
- .dock8s_after_script_template
script:
# prepare mountpoints
- exec_cmd eos-cli1 'atd; at now <<< "mkdir -p /eos1/ && mount -t fuse eosxd -ofsname=mount-1 /eos1/; mkdir -p /eos2/ && mount -t fuse eosxd -ofsname=mount-2 /eos2/;"'
- exec_cmd eos-cli1 'count=0; while [[ $count -le 10 ]] && ( [[ ! -d /eos1/dockertest/ ]] || [[ ! -d /eos2/dockertest/ ]] ); do echo "Wait for mount... $count"; (( count++ )); sleep 1; done;'
# fusex functional bindings
- exec_cmd eos-cli1 'atd; at now <<< "mkdir -p /eosfunctionaltest/ && mount -t fuse eosxd -ofsname=eosdockertest /eosfunctionaltest/;"'
- exec_cmd eos-cli1 'count=0; while [[ $count -le 10 ]] && [[ ! -d /eosfunctionaltest/dockertest/ ]] ; do echo "Wait for mount... $count"; (( count++ )); sleep 1; done;'
- exec_cmd eos-cli1 'su eos-user -c "mkdir -m 700 -p /eosfunctionaltest/dockertest/credentialtest/ && cd /eosfunctionaltest/dockertest/credentialtest/"'
- exec_cmd eos-cli1 'su eos-user -c "eos-test-credential-bindings /eosfunctionaltest/dockertest/credentialtest/"'
# fusex benchmark
- exec_cmd eos-mgm1 'eos ns mutex --toggleorder'
- exec_cmd eos-cli1 'su eos-user -c "mkdir -p /eos1/dockertest/fusex_tests/ && cd /eos1/dockertest/fusex_tests/ && fusex-benchmark"'
- exec_cmd eos-mgm1 'grep "RWMutex. Order Checking Error in thread" /var/log/eos/mgm/xrdlog.mgm && exit 1 || exit 0'
# download tests repo
- exec_cmd eos-cli1 'git clone https://gitlab.cern.ch/dss/eosclient-tests.git'
- exec_cmd eos-cli1 'cd /eosclient-tests && pip install -r requirements.txt'
# run the tests
# @todo(esindril): run "all" tests in schedule mode once these are properly supported
# if [[ "$CI_PIPELINE_SOURCE" == "schedule" ]];
# then
# exec_cmd eos-mgm1 'eos vid add gateway "eos-cli1.eos-cli1.${K8S_NAMESPACE}.svc.cluster.local" unix';
# exec_cmd eos-cli1 'env EOS_FUSE_NO_ROOT_SQUASH=1 python3 /eosclient-tests/run.py --workdir="/eos1/dockertest /eos2/dockertest" ci';
# fi
# until then just run the "ci" tests
- exec_cmd eos-cli1 'cd eosclient-tests; for n in prepare/*.sh; do /bin/bash $n prepare; done'
- exec_cmd eos-cli1 'su eos-user -c "python3 /eosclient-tests/run.py --workdir=\"/eos1/dockertest /eos2/dockertest\" ci"'
- exec_cmd eos-cli1 'cd eosclient-tests; for n in prepare/*.sh; do /bin/bash $n cleanup; done'
# fusex test SAMBA gateways authentication settings
# this will run on the client pod
- exec_cmd eos-mgm1 'eos vid enable sss'
- exec_cmd eos-mgm1 'eos vid enable unix'
- CLI_POD_HOSTNAME="$(exec_cmd eos-cli1 'hostname -f')"
- echo ${CLI_POD_HOSTNAME}
- exec_cmd eos-mgm1 "eos vid add gateway ${CLI_POD_HOSTNAME} unix"
- exec_cmd eos-cli1 'eos-fusex-functional-test --samba'
artifacts:
when: on_failure
expire_in: 3 days
paths:
- eos-logs-${CI_JOB_ID}/
.dock8s_cbox_test_template:
extends:
- .dock8s_before_script_template
- .dock8s_after_script_template
script:
# enable converter and prepare eoshome folder, cernbox alike
- exec_cmd eos-mgm1 'eos convert config set status=on'
- exec_cmd eos-mgm1 './eos_create_userhome.sh eos-user'
# prepare mountpoints
- exec_cmd eos-cli1 'atd; at now <<< "mkdir -p /eos/ && mount -t fuse eosxd -ofsname=eosdockertest /eos/"'
- exec_cmd eos-cli1 'count=0; while [[ $count -le 10 ]] && ( [[ ! -d /eos/ ]] ); do echo "Wait for mount... $count"; (( count++ )); sleep 1; done;'
# set krb5 ticket and download tests repo @note the 'export KRB5CCNAME to FILE: type' is a spooky trick, can be made nicer.
- exec_cmd eos-cli1 'echo -e "export KRB5CCNAME=FILE:/tmp/krb5cc_$(id -u eos-user)" >> ~/.bashrc'
- exec_cmd eos-cli1 'su eos-user -c "kinit eos-user@TEST.EOS -k -t /home/eos-user/eos-user.keytab"'
- exec_cmd eos-cli1 'su eos-user -c "git clone https://gitlab.cern.ch/dss/eosclient-tests.git /eos/user/e/eos-user/eosclient-tests"'
- exec_cmd eos-cli1 'su eos-user -c "cd /eos/user/e/eos-user/eosclient-tests && pip install -r requirements.txt"'
# launch the tests
- exec_cmd eos-cli1 'su eos-user -c "cd /eos/user/e/eos-user && python3 ./eosclient-tests/run.py --workdir=/eos/user/e/eos-user ci-eosfuse_release"'
- exec_cmd eos-cli1 'su eos-user -c "cd /eos/user/e/eos-user && python3 ./eosclient-tests/run.py --workdir=/eos/user/e/eos-user regression"'
artifacts:
when: on_failure
expire_in: 3 days
paths:
- eos-logs-${CI_JOB_ID}/
.dock8s_reva_test_template:
extends:
- .dock8s_before_script_template
- .dock8s_after_script_template
script:
# enable converter and prepare eoshome folder, cernbox alike
- exec_cmd eos-mgm1 'eos convert config set status=on'
- exec_cmd eos-mgm1 './eos_create_userhome.sh eos-user'
# prepare mountpoints
- exec_cmd eos-cli1 'atd; at now <<< "mkdir -p /eos/ && mount -t fuse eosxd -ofsname=eosdockertest /eos/"'
- exec_cmd eos-cli1 'count=0; while [[ $count -le 10 ]] && ( [[ ! -d /eos/ ]] ); do echo "Wait for mount... $count"; (( count++ )); sleep 1; done;'
# install dependencies
- exec_cmd eos-mgm1 'yum -y install nodejs npm git make tar'
# Install go
- exec_cmd eos-mgm1 "export PATH=\"/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:$PATH\" && /usr/bin/wget https://go.dev/dl/go1.25.1.linux-amd64.tar.gz && /usr/bin/tar -C /usr/local -xzf go1.25.1.linux-amd64.tar.gz && ln -s /usr/local/go/bin/go /usr/bin/go && ln -s /usr/local/go/bin/gofmt /usr/bin/gofmt || echo Failed to install golang"
- exec_cmd eos-mgm1 'npm install -g @intuit/judo'
# Build reva from latest tag
- exec_cmd eos-mgm1 'git clone https://github.com/cs3org/reva.git'
- |
exec_cmd eos-mgm1 'export PATH="$(/usr/bin/go env GOPATH)/bin:$PATH"; cd reva; git fetch --tags ;
export latest_tag=$(git tag --sort=-version:refname | grep -E "^v[0-9]+\.[0-9]+\.[0-9]+$" | head -1) ;
echo "Building reva tag: $latest_tag" ;
git checkout "$latest_tag"; make reva; make cernbox-revad; mkdir -p ../tmp'
# Put users.json and groups.json in the right place
- exec_cmd eos-mgm1 'cp reva/tests/integration/reva-cli/config/users.json tmp/users.json'
- exec_cmd eos-mgm1 'cp reva/tests/integration/reva-cli/config/groups.json tmp/groups.json'
# Put eos path as basepath for tests:
- |
exec_cmd eos-mgm1 \
"find reva/tests/integration/reva-cli -type f -name '*.yaml' -exec sed -i 's|^[[:space:]]*BASEDIR: \"/localfs\"$| BASEDIR: \"/eos/test\"|' {} +"
# Prepare mgm (add users, set vid mapping, create directory)
- exec_cmd eos-mgm1 "groupadd -g 123 myusers"
- exec_cmd eos-mgm1 "useradd -u 1255 -g myusers testuser"
- exec_cmd eos-mgm1 "useradd -u 1256 -g myusers testreceivinguser"
- exec_cmd eos-mgm1 "eos mkdir /eos/test"
- exec_cmd eos-mgm1 "eos chown 1255:123 /eos/test"
- exec_cmd eos-mgm1 "eos acl --sys u:1255=rwx /eos"
- exec_cmd eos-mgm1 "eos recycle config --add-bin /eos/"
- exec_cmd eos-mgm1 "eos attr set sys.versioning=\"10\" /eos/test"
- exec_cmd eos-mgm1 "eos attr set sys.forced.atomic=\"1\" /eos/test"
- exec_cmd eos-mgm1 "eos attr set sys.allow.oc.sync=\"1\" /eos/test"
- exec_cmd eos-mgm1 "eos attr ls /eos/test"
- exec_cmd eos-mgm1 "eos vid add gateway \"127.0.0.1\" grpc"
- exec_cmd eos-mgm1 "eos vid add gateway \"[:1]\" grpc"
- exec_cmd eos-mgm1 "eos vid add gateway \"[::1]\" grpc"
- exec_cmd eos-mgm1 "eos vid add gateway \"127.0.0.1\" https"
- exec_cmd eos-mgm1 "eos vid add gateway \"[:1]\" https"
- exec_cmd eos-mgm1 "eos vid add gateway \"[::1]\" https"
- exec_cmd eos-mgm1 "eos vid set map -grpc key:auth_key vuid:11 vgid:11"
- exec_cmd eos-mgm1 "eos vid set map -https key:auth_key vuid:11 vgid:11"
- exec_cmd eos-mgm1 "eos vid set membership 11 +sudo"
- exec_cmd eos-mgm1 "eos vid set membership 11 -uids 3"
- exec_cmd eos-mgm1 "eos vid set membership 11 -gids 4"
- exec_cmd eos-mgm1 "eos access allow group myusers"
- echo ${MGM_POD_HOSTNAME}
# We connect over https, and the certificate is only valid for the hostname, so replace localhost with the MGM's hostname
- exec_cmd eos-mgm1 'sed -i "s/^\(master_url = \"https:\/\/\)localhost\(:[0-9][0-9]*\"\)/\1$(hostname -f)\2/" reva/tests/integration/reva-cli/config/revad-eos.toml'
# Start revad
- exec_cmd eos-mgm1 './reva/cmd/revad/revad -c reva/tests/integration/reva-cli/config/revad-eos.toml revad.log 2>&1 & echo $! > revad.pid'
# Tests and debug output
- exec_cmd eos-mgm1 "cd reva; make test-reva-cli"
- exec_cmd eos-mgm1 "eos vid ls"
- exec_cmd eos-mgm1 "eos access ls"
- exec_cmd eos-mgm1 "cat revad.log"
artifacts:
when: on_failure
expire_in: 3 days
paths:
- eos-logs-${CI_JOB_ID}/
allow_failure: true
.dock8s_flamegraph_test_template:
extends:
- .dock8s_before_script_template
- .dock8s_after_script_template
script:
- date
- echo 0 > /proc/sys/kernel/perf_event_paranoid; cat /proc/sys/kernel/perf_event_paranoid
- echo 0 > /proc/sys/kernel/kptr_restrict; cat /proc/sys/kernel/kptr_restrict
- exec_cmd eos-mgm1 "mkdir eos-flamegraph-data; cd eos-flamegraph-data; /usr/sbin/eos-make-flamegraph"
artifacts:
expire_in: 1 days
paths:
- eos-logs-${CI_JOB_ID}/
.unit_test_template: &unit_test_template_definition
stage: test
variables:
OS_TAG: ".el9"
script:
# generic unit tests
- eos-unit-tests
- eos-unit-tests-fst
- eos-fusex-tests
- pip3 install pytest; python3 -m pytest /usr/sbin/test-eos-iam-mapfile.py
# namespace specific unit tests
- export EOS_QUARKDB_HOSTPORT=localhost:7777
- quarkdb-create --path /var/quarkdb/node-0
- chown -R daemon:daemon /var/quarkdb/node-0
- xrootd -n qdb -c /etc/xrd.cf.quarkdb -l /var/log/eos/xrdlog.qdb -b -Rdaemon
- eos-ns-quarkdb-tests
- cp /usr/sbin/qclient-tests . && GTEST_DEATH_TEST_USE_FORK=1 ./qclient-tests
needs:
- job: el9_docker_image
artifacts: false
retry: 1
tags:
- docker_node
- dock
unit_test:tag:
extends: .unit_test_template
image:
name: gitlab-registry.cern.ch/dss/eos/eos-ci:${CI_COMMIT_TAG}${OS_TAG}
entrypoint: ["/bin/bash", "-c"]
only:
- tags
unit_test:
extends: .unit_test_template
image:
name: gitlab-registry.cern.ch/dss/eos/eos-ci:${CI_COMMIT_SHORT_SHA}${OS_TAG}
entrypoint: ["/bin/bash", "-c"]
except:
- tags
unit_test_asan:
extends: .unit_test_template
image:
name: gitlab-registry.cern.ch/dss/eos/eos-ci:${CI_COMMIT_SHORT_SHA}${OS_TAG}${EXTRA_TAG}
entrypoint: ["/bin/bash", "-c"]
variables:
LSAN_OPTIONS: "suppressions=/var/eos/test/LeakSanitizer.supp" # Suppress known memory leaks. For the generic tests
ASAN_OPTIONS: "fast_unwind_on_malloc=0" # Avoid indirect leaks from linked dependencies. For the namespace tests
EXTRA_TAG: "_asan"
needs:
- job: el9_asan_docker_image
artifacts: false
when: manual
allow_failure: true
#-------------------------------------------------------------------------------
# RPM publishing
#-------------------------------------------------------------------------------
.publish_koji_template: &publish_koji_template_definition
stage: publish
image: gitlab-registry.cern.ch/linuxsupport/rpmci/kojicli
script:
- yum install --nogpg -y sssd-client
- kinit stci@CERN.CH -k -t /stci.krb5/stci.keytab
# KOJI_SCRATCH will be set for branches and empty for tags
- koji build ${KOJI_SCRATCH} ${TARGET} ${BUILD_NAME}_artifacts/SRPMS/*.src.rpm
tags:
- docker_node
- publish
when: manual
publish_koji_al8:
<<: *publish_koji_template_definition
variables:
TARGET: "eos8al"
BUILD_NAME: "el-8"
only:
- schedules
- tags
needs:
- job: build_el8
artifacts: true
publish_koji_al9:
<<: *publish_koji_template_definition
variables:
TARGET: "eos9al"
BUILD_NAME: "el-9"
needs:
- job: build_el9
artifacts: true
publish_koji_al10:
<<: *publish_koji_template_definition
variables:
TARGET: "eos10al"
BUILD_NAME: "el-10"
only:
- schedules
- tags
needs:
- job: build_el10
artifacts: true
publish_koji_rh-8:
<<: *publish_koji_template_definition
variables:
TARGET: "eos8el"
BUILD_NAME: "rh-8"
only:
- schedules
- tags
needs:
- job: rh-8
publish_koji_rh-9:
<<: *publish_koji_template_definition
variables:
TARGET: "eos9el"
BUILD_NAME: "rh-9"
only:
- schedules
- tags
needs:
- job: rh-9
publish_koji_rh-10:
<<: *publish_koji_template_definition
variables:
TARGET: "eos10el"
BUILD_NAME: "rh-10"
only:
- schedules
- tags
needs:
- job: rh-10
email_notification:
stage: publish
image: gitlab-registry.cern.ch/linuxsupport/alma9-base
variables:
ENV: production
TO_ADDRS: project-eos-commits@cern.ch,lxbatch-experts@cern.ch
script:
- dnf install -y git python pip
- git clone https://token:$EOS_REPO_MAILSERVICE_TOKEN@gitlab.cern.ch/eos/eos-mailservices-code-samples.git
- cd eos-mailservices-code-samples/Python/oauth2-samples; pip install --no-input -r requirements.txt
- export EOS_VERSION=$CI_COMMIT_TAG
- python -m oauth2_smtp
needs:
- job: publish_koji_al9
only:
- tags
# This job uses CI_JOB_TOKEN to trigger a pipeline in the CTA project.
# This means that whoever triggers this job, must have the rights to start a pipeline in CTA.
notify_cta_project:
stage: publish
image: gitlab-registry.cern.ch/linuxsupport/alma9-base
needs:
- job: el9_docker_image
artifacts: false
variables:
CTA_BRANCH: main
script:
- dnf install --nogpg -y curl gawk jq
- CTA_PROJECT_ID="139306"
- CTA_PROJECT_API="https://gitlab.cern.ch/api/v4/projects/$CTA_PROJECT_ID"
- TRIGGER_URL="$CTA_PROJECT_API/trigger/pipeline"
# Construct EOS versions
- |
if [[ -z "$CI_COMMIT_TAG" ]]; then
EOS_IMAGE_TAG="$CI_COMMIT_SHORT_SHA.el9";
else
EOS_IMAGE_TAG="$CI_COMMIT_TAG.el9";
EOS_VERSION=$(dnf -q --repofrompath=temprepo,https://storage-ci.web.cern.ch/storage-ci/eos/diopside/tag/testing/el-9/x86_64/ --repo=temprepo repoquery --qf "%{version}-%{release}\n" eos-client | grep "${CI_COMMIT_TAG}" | tail -1);
if [[ -z ${EOS_VERSION} ]]; then
echo "ERROR: Could not find EOS RPMs for ${CI_COMMIT_TAG} in storage-ci.web.cern.ch/storage-ci/eos/diopside/tag/testing/el-9/x86_64/"
exit 1
fi
fi
# Extract XRootD version from eos.spec.in
- xrd_min=$(grep '^%define xrootd_version_min' eos.spec.in | awk '{print $3}')
- XROOTD_VERSION=$(dnf -q --repofrompath=temprepo,https://xrootd.web.cern.ch/repo/testing/el9/x86_64/ --repo=temprepo repoquery --qf "%{epoch}:%{version}-%{release}\n" xrootd | grep "${xrd_min}" | tail -1)
# Get the latest stable CTA version
- LATEST_CTA_VERSION=$(dnf -q --repofrompath=temprepo,https://cta-public-repo.web.cern.ch/stable/cta-5/el9/cta/x86_64/ --repo=temprepo repoquery --latest-limit=1 --qf "%{version}-%{release}" cta-taped)
# Against an existing CTA tag, we can only trigger a pipeline where a different EOS image is used
- echo "Triggering pipeline against CTA version $LATEST_CTA_VERSION with EOS image tag $EOS_IMAGE_TAG"
- curl -X POST
-F token=$CI_JOB_TOKEN
-F ref=$CTA_BRANCH
-F "variables[CUSTOM_EOS_IMAGE_TAG]=$EOS_IMAGE_TAG"
-F "variables[CUSTOM_CTA_VERSION]=$LATEST_CTA_VERSION"
-F "variables[PIPELINE_TYPE]=REGR_AGAINST_CTA_VERSION"
$TRIGGER_URL
# Against the CTA main branch, we can also test the client EOS and XRootD versions if we are on a (EOS) tag
# If we are not on a (EOS) tag, the EOS RPMs are not accessible by the CTA pipeline, so we only test against the image
- |
if [[ -z "$CI_COMMIT_TAG" ]]; then
echo "Triggering pipeline against CTA main branch"
echo "Using EOS image tag $EOS_IMAGE_TAG and XRootD version $XROOTD_VERSION"
curl -X POST \
-F token=$CI_JOB_TOKEN \
-F ref=$CTA_BRANCH \
-F "variables[CUSTOM_EOS_IMAGE_TAG]=$EOS_IMAGE_TAG" \
-F "variables[CUSTOM_XROOTD_VERSION]=$XROOTD_VERSION" \
-F "variables[PIPELINE_TYPE]=REGR_AGAINST_CTA_MAIN" \
$TRIGGER_URL
else
echo "Triggering pipeline against CTA main branch"
echo "Using EOS image tag $EOS_IMAGE_TAG, EOS client version $EOS_VERSION and XRootD version $XROOTD_VERSION"
curl -X POST \
-F token=$CI_JOB_TOKEN \
-F ref=$CTA_BRANCH \
-F "variables[CUSTOM_EOS_IMAGE_TAG]=$EOS_IMAGE_TAG" \
-F "variables[CUSTOM_EOS_VERSION]=$EOS_VERSION" \
-F "variables[CUSTOM_XROOTD_VERSION]=$XROOTD_VERSION" \
-F "variables[PIPELINE_TYPE]=REGR_AGAINST_CTA_MAIN" \
$TRIGGER_URL
fi
rules:
- if: '$CI_COMMIT_TAG'
when: on_success
- when: manual
allow_failure: true
rpm_commit_artifacts:
stage: publish
image: gitlab-registry.cern.ch/linuxsupport/alma9-base
needs:
- job: build_el8
artifacts: true
optional: true
- job: build_el9
artifacts: true
- job: build_el10
artifacts: true
optional: true
- job: build_el9_arm64
artifacts: true
optional: true
- job: build_fedora_38
artifacts: true
optional: true
script:
- dnf install --nogpg -y sudo sssd-client createrepo
- if [[ -n "$CI_COMMIT_TAG" ]]; then echo "This only works for commits"; exit 0; else BUILD_TYPE="commit"; fi
- sudo -u stci -H ./gitlab-ci/store_artifacts.sh ${CODENAME} ${BUILD_TYPE} /eos/project/s/storage-ci/www/eos
tags:
- docker_node
- publish
except:
- tags
allow_failure: true
when: manual
rpm_testing_artifacts:
stage: publish
image: gitlab-registry.cern.ch/linuxsupport/alma9-base
script:
- dnf install --nogpg -y sudo sssd-client createrepo
- if [[ -n "$CI_COMMIT_TAG" ]]; then BUILD_TYPE="tag/testing"; else BUILD_TYPE="commit"; fi
- sudo -u stci -H ./gitlab-ci/store_artifacts.sh ${CODENAME} ${BUILD_TYPE} /eos/project/s/storage-ci/www/eos
tags:
- docker_node
- publish
only:
- master
- tags
retry: 1
rpm_stable_artifacts:
stage: publish
image: gitlab-registry.cern.ch/linuxsupport/alma9-base
script:
- dnf install --nogpg -y sudo sssd-client createrepo
- if [[ -n "$CI_COMMIT_TAG" ]]; then BUILD_TYPE="tag"; else echo "This only works for tags"; exit 0; fi
- ./gitlab-ci/store_artifacts.sh ${CODENAME} ${BUILD_TYPE} /mnt/eos_repositories/eos
- sudo -u stci -H ./gitlab-ci/store_stable_artifacts.sh ${CODENAME} /eos/project/s/storage-ci/www/eos ${CI_COMMIT_TAG}
- echo ${CI_COMMIT_TAG} | sudo -u stci tee /eos/project/s/storage-ci/www/eos/${CODENAME}/tag/latest_version
tags:
- docker_node
- publish
only:
- tags
dependencies: []
when: manual
#to be run after the rpm publish
.publish_dockerimage-template:
stage: publish
image:
name: gcr.io/kaniko-project/executor:debug
entrypoint: [""]
script:
- if [[ -n "$CI_COMMIT_TAG" ]]; then
export REPOBRANCH="tag-testing";
export DESTINATION="${IMAGE_REPO}:${CI_COMMIT_TAG}${OS_TAG}";
else
export REPOBRANCH="commit";
export DESTINATION="${IMAGE_REPO}:${CI_COMMIT_SHORT_SHA}${OS_TAG}";
fi
- echo "{\"auths\":{\"$CI_REGISTRY\":{\"auth\":\"$(echo -n $CI_REGISTRY_USER:$CI_REGISTRY_PASSWORD | base64)\"}}}" > /kaniko/.docker/config.json
- /kaniko/executor --cache=false --destination $DESTINATION --dockerfile $DOCKERFILE --context $CI_PROJECT_DIR --build-arg=EOS_CODENAME="${CODENAME}" --build-arg=REPOBRANCH="${REPOBRANCH}" --compressed-caching=false --use-new-run
retry: 1
el9_publish_dockerimage_all:
extends: .publish_dockerimage-template
variables:
DOCKERFILE: eos-docker/minimal/el9_minimal.Dockerfile
IMAGE_REPO: "gitlab-registry.cern.ch/dss/eos/eos-all"
OS_TAG: ".el9"
needs:
- job: clone_docker
- job: build_el9
allow_failure: true
when: manual
el9_publish_dockerimage_fusex:
extends: .publish_dockerimage-template
variables:
DOCKERFILE: eos-docker/minimal/el9_minimal.fusex-only.Dockerfile
IMAGE_REPO: "gitlab-registry.cern.ch/dss/eos/eos-fusex"
OS_TAG: ".el9"
needs:
- job: clone_docker
- job: build_el9
allow_failure: true
when: manual
#-------------------------------------------------------------------------------
# RPM cleaning
#-------------------------------------------------------------------------------
clean_rpm_artifacts:
stage: clean
image: gitlab-registry.cern.ch/linuxsupport/alma9-base
script:
- dnf install --nogpg -y sssd-client sudo createrepo
- sudo -u stci -H ./gitlab-ci/remove_old_artifacts.sh
allow_failure: true
only:
- triggers
- schedules
tags:
- docker_node
# get all the namespaces, filter out the "mgmt" ones, delete if older than 30h
clean_k8s_cluster:
stage: clean
image: alpine/k8s:1.18.2
script:
- export KUBECONFIG=$K8S_CONFIG
- set +o pipefail
- kubectl get namespaces --no-headers | grep -v 'default\|kube-node-lease\|kube-public\|kube-system\|magnum-tiller' |
awk 'match($3,/(([3-9][0-9]|[1-9][0-9][0-9]+)h|[1-9][0-9]*d)/) {print $1}' | xargs --no-run-if-empty kubectl delete namespaces
dependencies: []
allow_failure: true
only:
- schedules
tags:
- docker_node
- k8s
# @todo cleanup helm leftover for failed / hanging tests. May be merged to 'clean_k8s_cluster'
clean_helm_cluster:
stage: clean
image: gitlab-registry.cern.ch/dss/alpine-enhanced:3.13.5
script:
- export KUBECONFIG=$K8S_CONFIG
- echo "Please, implement me!"
dependencies: []
allow_failure: true
only:
- schedules
#-------------------------------------------------------------------------------
# Manually triggered builds
#-------------------------------------------------------------------------------
.eos_nginx_build_template:
stage: build:manual
variables:
PKG_MGR: dnf
script:
- ${PKG_MGR} install --nogpg -y gcc-c++ cmake make rpm-build which git sudo yum-utils createrepo sssd-client
- cd nginx
- ./makesrpm.sh
- |
if [[ ${PKG_MGR} == "yum" ]]; then
${PKG_MGR}-builddep -y --nogpgcheck *.src.rpm
else
${PKG_MGR} install -y dnf-plugins-core
${PKG_MGR} builddep -y --nogpgcheck *.src.rpm
fi
- mkdir RPMS
- rpmbuild --rebuild --define "_rpmdir RPMS/" --define "_build_name_fmt %%{NAME}-%%{VERSION}-%%{RELEASE}.%%{ARCH}.rpm" *.src.rpm
- STORAGE_PATH=/eos/project/s/storage-ci/www/eos/${CODENAME}-depend/${BUILD_NAME}/x86_64
- sudo -u stci -H mkdir -p $STORAGE_PATH
- sudo -u stci -H cp -f RPMS/*.rpm $STORAGE_PATH
- sudo -u stci -H createrepo --update -q $STORAGE_PATH
tags:
- docker_node
when: manual
eos_nginx_el-8:
extends: .eos_nginx_build_template
image: gitlab-registry.cern.ch/linuxsupport/alma8-base
variables:
BUILD_NAME: el-8
eos_nginx_el-9:
extends: .eos_nginx_build_template
image: gitlab-registry.cern.ch/linuxsupport/alma9-base
variables:
BUILD_NAME: el-9
#-------------------------------------------------------------------------------
# ALICE ApMon builds
#-------------------------------------------------------------------------------
.build-apmon-template: &build-apmon-template-definition
stage: build:manual
variables:
PKG_MGR: dnf
script:
- ${PKG_MGR} install --nogpg -y gcc-c++ make rpm-build which git sssd-client sudo createrepo rsync tar gawk
- cd ApMon; ./maketar.sh
- rpmbuild --define "_source_filedigest_algorithm md5" --define "_binary_filedigest_algorithm md5" --define "_topdir ./rpmbuild" -ts eos-apmon-*.tar.gz
- |
if [[ ${PKG_MGR} == "yum" ]]; then
${PKG_MGR}-builddep -y --nogpgcheck rpmbuild/SRPMS/eos-apmon-*.src.rpm
else
${PKG_MGR} install -y dnf-plugins-core
${PKG_MGR} builddep -y --nogpgcheck rpmbuild/SRPMS/eos-apmon-*.src.rpm
fi
- rpmbuild --rebuild --define "_rpmdir rpmbuild/RPMS/" --define "_build_name_fmt %%{NAME}-%%{VERSION}-%%{RELEASE}.%%{ARCH}.rpm" rpmbuild/SRPMS/eos-apmon-*.src.rpm
- sudo -u stci -H mkdir -p ${STORAGE_PATH}
- sudo -u stci -H cp -f rpmbuild/RPMS/*.rpm ${STORAGE_PATH}
- sudo -u stci -H createrepo --update -q ${STORAGE_PATH}
tags:
- docker_node
when: manual
eos_apmon_el-8:
image: gitlab-registry.cern.ch/linuxsupport/alma8-base
variables:
STORAGE_PATH: /eos/project/s/storage-ci/www/eos/${CODENAME}-depend/el-8/x86_64
extends: .build-apmon-template
eos_apmon_el-9:
image: gitlab-registry.cern.ch/linuxsupport/alma9-base
variables:
STORAGE_PATH: /eos/project/s/storage-ci/www/eos/${CODENAME}-depend/el-9/x86_64
extends: .build-apmon-template
eos_docs:
stage: build:manual
image: gitlab-registry.cern.ch/linuxsupport/alma9-base
script:
- yum install --nogpg -y make python3-sphinx sssd-client sudo which git
- cd doc
- export PYTHONPATH=`pwd`/_themes/
- cd diopside
- make html
- make html
- sudo kinit stci@CERN.CH -k -t /stci.krb5/stci.keytab
- sudo -u stci -H rm -rf /eos/project/e/eos/www/docs/diopside/*
- sudo -u stci -H cp -R _build/html/* /eos/project/e/eos/www/docs/diopside
tags:
- docker_node
rules:
- if: '$CI_COMMIT_TAG'
when: on_success
allow_failure: true
- when: manual
allow_failure: true
.eos_repopackage:
stage: build:manual
image: gitlab-registry.cern.ch/linuxsupport/cc7-base
script:
- yum install --nogpg -y rpm-build sssd-client sudo createrepo
- mkdir build
- cd build
- rpmbuild --bb --define "_rpmdir RPMS/" --define "_build_name_fmt %%{NAME}-%%{VERSION}-%%{RELEASE}.%%{ARCH}.rpm" ../elrepopackage.spec
- STORAGE_PATH=/eos/project/s/storage-ci/www/eos/${CODENAME}/tag/el-7/x86_64
- sudo -u stci -H mkdir -p $STORAGE_PATH
- sudo -u stci -H cp -f RPMS/*.rpm $STORAGE_PATH
- sudo -u stci -H createrepo --update -q $STORAGE_PATH
tags:
- docker_node
when: manual
pre_commit:
stage: pre-commit
image: gitlab-registry.cern.ch/linuxsupport/alma10-base
needs: [ ]
variables:
PIP_NO_CACHE_DIR: "1"
before_script:
- dnf install -y git python python-pip clang-tools-extra
- python -m pip install --upgrade pip
- pip install pre-commit
script:
# Run pre-commit against all files starting from a given commit to HEAD
# This is done to avoid issues with large diffs when running pre-commit on all files (blocks the CI for ~10 minutes)
# When (if) the whole is formatted, we can remove the `--from-ref` and `--to-ref` options to always check all files
# pre-commit is run but clang-format will not be applied to all files, just to changed lines.
# In this stage it will not run since there are no changes, so clang-format here will do nothing
- pre-commit run --from-ref $(git merge-base origin/master HEAD) --to-ref HEAD --all-files
# Now we compute the diff and run clang-format on the diff only, as if we had the hook installed during our commits.
# This will modify the files in place, so you can copy-paste this command to fix the formatting issues locally and then commit the changes but if you have the hook installed you should never need to!
- git diff -U0 $(git merge-base origin/master HEAD) HEAD | python3 utils/clang-format-diff.py -p1 -i
# check if the previous command made any changes, if so, fail the job to enforce formatting
- git diff --exit-code || (echo "Code is not properly formatted, please run the above command to fix the formatting issues and commit the changes." && exit 1)
allow_failure: true # we could enable this soon but let's keep it optional for now until people have had time to adapt to the new formatting rules
================================================
FILE: .gitmodules
================================================
[submodule "namespace/ns_quarkdb/qclient"]
path = namespace/ns_quarkdb/qclient
url = https://gitlab.cern.ch/eos/qclient.git
[submodule "mgm/cta_interface"]
path = mgm/cta_interface
url = https://gitlab.cern.ch/eos/xrootd-ssi-protobuf-interface.git
[submodule "common/backward-cpp"]
path = common/backward-cpp
url = https://github.com/bombela/backward-cpp.git
branch = master
[submodule "common/xrootd-ssi-protobuf-interface"]
path = common/xrootd-ssi-protobuf-interface
url = https://:@gitlab.cern.ch:8443/eos/xrootd-ssi-protobuf-interface.git
[submodule "unit_tests/googletest"]
path = unit_tests/googletest
url = https://github.com/google/googletest
[submodule "common/grpc-proto"]
path = common/grpc-proto
url = https://:@gitlab.cern.ch:8443/eos/grpc-proto.git
[submodule "common/jwt-cpp"]
path = common/jwt-cpp
url = https://github.com/Thalhammer/jwt-cpp.git
[submodule "quarkdb"]
path = quarkdb
url = https://gitlab.cern.ch/eos/quarkdb.git
[submodule "test/microbenchmarks/benchmark"]
path = test/microbenchmarks/benchmark
url = https://github.com/google/benchmark
[submodule "common/cppzmq"]
path = common/cppzmq
url = https://github.com/zeromq/cppzmq.git
[submodule "proto/eos-protobuf-spec"]
path = proto/eos-protobuf-spec
url = https://gitlab.cern.ch/eos/eos-protobuf-spec.git
[submodule "fst/css_plugin"]
path = fst/css_plugin
url = https://gitlab.cern.ch/eos/css_plugin.git
[submodule "console/parser"]
path = console/parser
url = https://github.com/CLIUtils/CLI11.git
================================================
FILE: .ignore
================================================
unit_tests/googletest/
common/fmt/
namespace/ns_quarkdb/qclient/src/fmt/
common/sqlite/
man/man1/
.vscode
================================================
FILE: .mailmap
================================================
Abhishek Lekshmanan
Abhishek Lekshmanan
Andrea Manzi
Andreas Joachim Peters
Andreas Joachim Peters
Andreas Joachim Peters
Andreas Joachim Peters
Andreas Joachim Peters
Andreas Joachim Peters
Andreas Joachim Peters
Andreas Joachim Peters
Andreas Joachim Peters
Andreas Stoeve
Andreas Stoeve
Andreas Stoeve
Andreas Stoeve
Andreas Stoeve
Andreas Stoeve
Andreea Prigoreanu
Branko Blagojevic
Cristian Contescu
Crystal Chua
Elvin Alin Sindrilaru
Elvin Alin Sindrilaru
Elvin Alin Sindrilaru
Elvin Alin Sindrilaru
Geoffray Adde
Geoffray Adde
Geoffray Adde
Geoffray Adde
Herve Rousseau
Herve Rousseau
Jaroslav Guenther
Jaroslav Guenther
Jaroslav Guenther
Jozsef Makai
Jozsef Makai
Konstantinos Tsitsimpikos
Konstantinos Tsitsimpikos
Lukasz Janyst
Manuel Reis
Manuel Reis
Michal Kamil Simon
Mr Jenkins
Paul Lensing
Paul Lensing
Paul Lensing
Paul Lensing
Steven Murray
Unknown
Unknown
Unknown
Unknown
Unknown
Unknown
Unknown
Unknown
Unknown
Unknown
Unknown
Unknown
================================================
FILE: .pre-commit-config.yaml
================================================
fail_fast: false
repos:
- repo: https://github.com/pre-commit/pre-commit-hooks
rev: v6.0.0
hooks:
- id: check-added-large-files
- id: check-case-conflict
- id: check-merge-conflict
- id: check-symlinks
- id: check-yaml
- id: debug-statements
- id: end-of-file-fixer
- id: mixed-line-ending
- id: requirements-txt-fixer
- id: trailing-whitespace
# # Disable for now and use clang-format-diff.py instead to only format changed lines.
# - repo: https://github.com/pocc/pre-commit-hooks
# rev: v1.3.5
# hooks:
# - id: clang-format
# name: clang-format
# types_or: [ c, c++ ]
# args:
# - -i
# stages: [ manual ]
- repo: local
hooks:
- id: clang-format-diff
name: clang-format-diff
# We pipe 'git diff' to the python script.
# -p1 strips the a/ b/ prefixes from the diff so the script finds the files.
# -i applies the edits in-place.
entry: bash -c 'git diff -U0 --no-color --cached | python3 utils/clang-format-diff.py -p1 -i'
language: system
types_or: [ c, c++ ]
pass_filenames: false
================================================
FILE: AUDIT.md
================================================
## EOS Audit Logging
### Overview
EOS implements structured audit logging for successful operations that modify the namespace or file metadata. Audit entries are encoded as JSON (one record per line), written directly into ZSTD-compressed log segments, and rotated every 1 hour by default. A symlink `audit.zstd` always points to the current active segment.
This document explains what is logged, the record format, where files are written, rotation behavior, how to parse the logs, and where audit hooks are integrated in the codebase.
### Scope: What gets logged
- **Successful namespace-affecting operations by identified users**:
- **Files**: CREATE, DELETE, RENAME/MOVE, TRUNCATE, WRITE (commit), UPDATE (open for write without create/truncate)
- **Directories**: MKDIR, RMDIR, RENAME/MOVE
- **Symlinks**: SYMLINK creation, DELETE
- **Metadata**: CHMOD, CHOWN, SET_XATTR, RM_XATTR, SET_ACL
- **Optional**: READ and LIST can be enabled later (not default; high volume).
- **Excluded**: Failed attempts, internal non-human activities (e.g. purge/version housekeeping).
### Record format (protobuf → JSON)
Each audit line is a JSON serialization of the `eos.audit.AuditRecord` protobuf (`proto/Audit.proto`). Key elements:
- **Common fields**
- `timestamp` (int64): seconds since epoch (server time)
- `path` (string): absolute path to object; directory paths end with '/'
- `operation` (enum): one of CREATE, DELETE, RENAME, WRITE, TRUNCATE, SET_XATTR, RM_XATTR, SET_ACL, CHMOD, CHOWN, MKDIR, RMDIR, SYMLINK, UPDATE
- `client_ip` (string), `account` (string)
- `auth` (mechanism string + attributes map)
- `authorization` (reasons[])
- `trace_id` (string): server trace id
- `target` (string): for rename/symlink target path
- `uuid` (string): client/session id (empty if placeholder `xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx`)
- `tid` (string): client trace identifier
- `app` (string): client application
- `svc` (string): emitting service (e.g. "mgm")
- **State snapshots**
- `before` / `after` (Stat): include `ctime`, `mtime`, `uid`, `gid`, `mode` (uint32), `mode_octal` (string), `size` (uint64), `checksum` (hex string for files)
- `attrs` (repeated AttrChange): `{ name, before, after }` for xattr changes (non-system attributes)
- **Nanosecond resolution times**
- `Stat.ctime_ns` and `Stat.mtime_ns` provide full-resolution strings in the form `seconds.nanoseconds` (e.g. `1730985600.123456789`).
- **Source and version metadata**
- `src_file`, `src_line`: source file and line where the audit call originated
- `version`: software version used when emitting the record
Example JSON line (pretty-printed for readability):
```json
{
"timestamp": 1730985600,
"path": "/eos/user/a/alice/data/file.txt",
"operation": "WRITE",
"client_ip": "192.0.2.10",
"account": "alice",
"auth": { "mechanism": "krb5", "attributes": {"principal": "alice@EXAMPLE.ORG"} },
"authorization": { "reasons": ["uid-match"] },
"trace_id": "srv-abc123",
"uuid": "550e8400-e29b-41d4-a716-446655440000",
"tid": "cli-xyz789",
"app": "eoscp",
"svc": "mgm",
"before": { "ctime": 1730980000, "mtime": 1730981000, "uid": 1000, "gid": 1000, "mode": 420, "mode_octal": "0100644", "size": 1024, "checksum": "a1b2..." },
"after": { "ctime": 1730980000, "mtime": 1730985600, "ctime_ns": "1730980000.000000000", "mtime_ns": "1730985600.123456789", "uid": 1000, "gid": 1000, "mode": 420, "mode_octal": "0100644", "size": 4096, "checksum": "dead..." },
"src_file": "mgm/FuseServer/Server.cc",
"src_line": 2600,
"version": ""
}
```
### Log files, rotation, and location
- **Location**: `/audit/` where `logdir` is derived from `XRDLOGDIR` (see `mgm/XrdMgmOfsConfigure.cc`).
- Directory is created on startup if missing; mode 0755; owned appropriately by the service user.
- **Active segment symlink**: `/audit/audit.zstd` points to the current segment file.
- **Segments**: Files are ZSTD-compressed; rotated every 1 hour by default.
- Override the rotation interval via environment variable: `EOS_AUDIT_ROTATION=`
- Filenames include seconds for uniqueness: `audit-YYYYMMDD-HHMMSS.zst`
- On rotation, the symlink is atomically updated to the new segment.
### ZSTD stream and flushing
- On opening a new segment, the ZSTD frame header is flushed immediately to avoid `zstdcat` errors on empty files.
- Each record is written and flushed so small bursts are visible promptly.
### Implementation details
- `common/Audit.hh`, `common/Audit.cc` implement the audit writer:
- Thread-safe writer with internal locking
- Base directory configurable via `setBaseDirectory` or during construction
- `audit(const AuditRecord&)` and a convenience overload to populate from `VirtualIdentity`, operation, path, etc.
- Automatic rotation based on time; symlink management (`audit.zstd`)
- Normalizes placeholder UUID to empty string
### READ and LIST auditing (optional)
- **Disabled by default.** Enable only when needed due to potential volume.
- **Enabling via API** (on `eos::common::Audit`):
- `setReadAuditing(true|false)` — enable/disable READ auditing
- `setListAuditing(true|false)` — enable/disable directory LIST auditing
- **Suffix filter for READ auditing**:
- By default, READ auditing applies to common document-style files: `txt, pdf, doc, docx, ppt, pptx, xls, xlsx, odt, ods, odp, rtf, csv, json, xml, yaml, yml, md, html, htm`.
- Configure at runtime with `setReadAuditSuffixes({"pdf","docx",...})`.
- If the vector contains `"*"`, all files are audited for READ (equivalent to `setReadAuditAll(true)`).
- Matching is case-insensitive and based on the file extension of the path being opened.
- **Where READ/LIST audits are emitted**:
- READ: in `mgm/XrdMgmOfsFile.cc::open` for successful read-only opens (including 0-size files served by MGM) when enabled and suffix matches.
- LIST: in `mgm/XrdMgmOfsDirectory.cc::_open` on successful directory opens when enabled.
### Default settings in XrdMgmOfs
- The MGM reads environment variables at startup and applies them to the `Audit` instance:
- Default mode (`EOS_MGM_AUDIT` unset or `default`):
- Audit all modifications (CREATE, DELETE, RENAME, TRUNCATE, WRITE, UPDATE, metadata changes)
- Audit READ for the default document-style suffix list
- Do not audit LIST
### Per-directory attribute-based auditing (sys.audit)
- When `EOS_MGM_AUDIT=attribute`, global auditing is disabled and auditing is enabled per directory via the extended attribute `sys.audit` set on the parent directory (for files) or the directory itself (for LIST).
- Valid values for `sys.audit` (case-insensitive):
- `none` / `no` / `false` / `off`: disable auditing for that directory
- `modifications`: enable modifications only (CREATE/DELETE/RENAME/TRUNCATE/WRITE/UPDATE/metadata)
- `default`: enable modifications and READ filtered by the default suffix list; LIST remains off
- `detail`: enable modifications and READ for all files; LIST remains off
- `all`: enable everything including LIST and READ for all files
- Evaluation points:
- Files: parent directory’s `sys.audit`
- LIST: the directory’s own `sys.audit`
- Notes:
- `EOS_MGM_AUDIT=off` disables auditing completely; `sys.audit` is ignored.
- In non-`attribute` modes, global settings control auditing; `sys.audit` is not used to override them.
### Environment configuration
- `EOS_MGM_AUDIT` — control overall audit level (parsed in `XrdMgmOfs` and applied during configure):
- `none`, `false`, `no`, `off`, or empty: disable all auditing
- `default`: audit modifications and READ for default document suffixes (no LIST)
- `modifications`: audit only modifications (no LIST, no READ)
- `detail`: audit modifications and READ for all files (no LIST)
- `all`: audit everything, including LIST and READ for all files
- `attribute`: create the audit logger but disable all global auditing; auditing is enabled explicitly via `sys.audit`
- `EOS_MGM_AUDIT_READ_SUFFIX` — override the READ suffix filter:
- Comma-separated list, case-insensitive (e.g. `pdf,docx,json`)
- Use `*` to audit READ for all files
- If unset, the built-in default document-style list is used
Notes:
- Variables are parsed in `XrdMgmOfs` constructor and applied after the `Audit` instance is created in `XrdMgmOfsConfigure.cc`.
- Setting `EOS_MGM_AUDIT=attribute` keeps the logger active while relying solely on per-directory `sys.audit` to enable auditing.
- Setting `EOS_MGM_AUDIT=off` disables the logger entirely (no auditing).
### Integration points (where audits are emitted)
- Core MGM (`mgm/`):
- `XrdMgmOfs.hh`: `std::unique_ptr mAudit` member
- `XrdMgmOfsConfigure.cc`: initializes `mAudit` with `/audit/`
- Operations:
- Files: `XrdMgmOfsFile.cc::open` (CREATE, TRUNCATE, UPDATE, READ), `fsctl/Commit.cc` (WRITE)
- Directories: `Mkdir.cc` (MKDIR), `Remdir.cc` (RMDIR), `XrdMgmOfsDirectory.cc` (LIST)
- Metadata: `Chmod.cc` (CHMOD), `Chown.cc` (CHOWN), `Attr.cc` (SET_XATTR, RM_XATTR)
- Symlinks: `Link.cc` (SYMLINK)
- Delete: `Rm.cc` (DELETE)
- FUSE server (`mgm/FuseServer/Server.cc`):
- Directories: `OpSetDirectory` (MKDIR, UPDATE/RENAME/MOVE; xattr changes), `OpDeleteDirectory` (RMDIR)
- Files: `OpSetFile` (CREATE, UPDATE, RENAME/MOVE; CHMOD/CHOWN detection; xattr changes), `OpDeleteFile` (DELETE)
- Symlinks: `OpSetLink` (SYMLINK), `OpDeleteLink` (DELETE)
### Directory path convention
- Directory paths in audit entries include a trailing slash `/` for unambiguous parsing.
### Mode representation
- `mode` is stored as an integer (uint32) and `mode_octal` as a string in octal for convenience.
### Parsing and tooling
- Stream current audit records:
```bash
zstdcat /audit/audit.zstd | jq '.'
```
- Follow audit logs across rotations (like `tail -F`):
```bash
zstdtail /audit/audit.zstd
# Or with filtering:
zstdtail /audit/audit.zstd -- jq 'select(.operation == "DELETE")'
```
- Historical segments are named `audit-YYYYMMDD-HHMMSS.zst`. Each line is a standalone JSON record; consumers can ingest line-by-line.
### Testing and performance
- Unit tests: `unit_tests/common/AuditTests.cc`
- Rotation and symlink behavior
- Benchmark: writes 100,000 records and measures elapsed time
### Notes and caveats
- Only successful operations are logged.
- READ/LIST are intentionally omitted by default due to volume; can be added later.
- The audit writer flushes after each record for operational visibility; adjust if batching is later desired.
================================================
FILE: ApMon/AUTHORS
================================================
================================================
FILE: ApMon/COPYING
================================================
GNU GENERAL PUBLIC LICENSE
Version 2, June 1991
Copyright (C) 1989, 1991 Free Software Foundation, Inc.
59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
Everyone is permitted to copy and distribute verbatim copies
of this license document, but changing it is not allowed.
Preamble
The licenses for most software are designed to take away your
freedom to share and change it. By contrast, the GNU General Public
License is intended to guarantee your freedom to share and change free
software--to make sure the software is free for all its users. This
General Public License applies to most of the Free Software
Foundation's software and to any other program whose authors commit to
using it. (Some other Free Software Foundation software is covered by
the GNU Library General Public License instead.) You can apply it to
your programs, too.
When we speak of free software, we are referring to freedom, not
price. Our General Public Licenses are designed to make sure that you
have the freedom to distribute copies of free software (and charge for
this service if you wish), that you receive source code or can get it
if you want it, that you can change the software or use pieces of it
in new free programs; and that you know you can do these things.
To protect your rights, we need to make restrictions that forbid
anyone to deny you these rights or to ask you to surrender the rights.
These restrictions translate to certain responsibilities for you if you
distribute copies of the software, or if you modify it.
For example, if you distribute copies of such a program, whether
gratis or for a fee, you must give the recipients all the rights that
you have. You must make sure that they, too, receive or can get the
source code. And you must show them these terms so they know their
rights.
We protect your rights with two steps: (1) copyright the software, and
(2) offer you this license which gives you legal permission to copy,
distribute and/or modify the software.
Also, for each author's protection and ours, we want to make certain
that everyone understands that there is no warranty for this free
software. If the software is modified by someone else and passed on, we
want its recipients to know that what they have is not the original, so
that any problems introduced by others will not reflect on the original
authors' reputations.
Finally, any free program is threatened constantly by software
patents. We wish to avoid the danger that redistributors of a free
program will individually obtain patent licenses, in effect making the
program proprietary. To prevent this, we have made it clear that any
patent must be licensed for everyone's free use or not licensed at all.
The precise terms and conditions for copying, distribution and
modification follow.
GNU GENERAL PUBLIC LICENSE
TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
0. This License applies to any program or other work which contains
a notice placed by the copyright holder saying it may be distributed
under the terms of this General Public License. The "Program", below,
refers to any such program or work, and a "work based on the Program"
means either the Program or any derivative work under copyright law:
that is to say, a work containing the Program or a portion of it,
either verbatim or with modifications and/or translated into another
language. (Hereinafter, translation is included without limitation in
the term "modification".) Each licensee is addressed as "you".
Activities other than copying, distribution and modification are not
covered by this License; they are outside its scope. The act of
running the Program is not restricted, and the output from the Program
is covered only if its contents constitute a work based on the
Program (independent of having been made by running the Program).
Whether that is true depends on what the Program does.
1. You may copy and distribute verbatim copies of the Program's
source code as you receive it, in any medium, provided that you
conspicuously and appropriately publish on each copy an appropriate
copyright notice and disclaimer of warranty; keep intact all the
notices that refer to this License and to the absence of any warranty;
and give any other recipients of the Program a copy of this License
along with the Program.
You may charge a fee for the physical act of transferring a copy, and
you may at your option offer warranty protection in exchange for a fee.
2. You may modify your copy or copies of the Program or any portion
of it, thus forming a work based on the Program, and copy and
distribute such modifications or work under the terms of Section 1
above, provided that you also meet all of these conditions:
a) You must cause the modified files to carry prominent notices
stating that you changed the files and the date of any change.
b) You must cause any work that you distribute or publish, that in
whole or in part contains or is derived from the Program or any
part thereof, to be licensed as a whole at no charge to all third
parties under the terms of this License.
c) If the modified program normally reads commands interactively
when run, you must cause it, when started running for such
interactive use in the most ordinary way, to print or display an
announcement including an appropriate copyright notice and a
notice that there is no warranty (or else, saying that you provide
a warranty) and that users may redistribute the program under
these conditions, and telling the user how to view a copy of this
License. (Exception: if the Program itself is interactive but
does not normally print such an announcement, your work based on
the Program is not required to print an announcement.)
These requirements apply to the modified work as a whole. If
identifiable sections of that work are not derived from the Program,
and can be reasonably considered independent and separate works in
themselves, then this License, and its terms, do not apply to those
sections when you distribute them as separate works. But when you
distribute the same sections as part of a whole which is a work based
on the Program, the distribution of the whole must be on the terms of
this License, whose permissions for other licensees extend to the
entire whole, and thus to each and every part regardless of who wrote it.
Thus, it is not the intent of this section to claim rights or contest
your rights to work written entirely by you; rather, the intent is to
exercise the right to control the distribution of derivative or
collective works based on the Program.
In addition, mere aggregation of another work not based on the Program
with the Program (or with a work based on the Program) on a volume of
a storage or distribution medium does not bring the other work under
the scope of this License.
3. You may copy and distribute the Program (or a work based on it,
under Section 2) in object code or executable form under the terms of
Sections 1 and 2 above provided that you also do one of the following:
a) Accompany it with the complete corresponding machine-readable
source code, which must be distributed under the terms of Sections
1 and 2 above on a medium customarily used for software interchange; or,
b) Accompany it with a written offer, valid for at least three
years, to give any third party, for a charge no more than your
cost of physically performing source distribution, a complete
machine-readable copy of the corresponding source code, to be
distributed under the terms of Sections 1 and 2 above on a medium
customarily used for software interchange; or,
c) Accompany it with the information you received as to the offer
to distribute corresponding source code. (This alternative is
allowed only for noncommercial distribution and only if you
received the program in object code or executable form with such
an offer, in accord with Subsection b above.)
The source code for a work means the preferred form of the work for
making modifications to it. For an executable work, complete source
code means all the source code for all modules it contains, plus any
associated interface definition files, plus the scripts used to
control compilation and installation of the executable. However, as a
special exception, the source code distributed need not include
anything that is normally distributed (in either source or binary
form) with the major components (compiler, kernel, and so on) of the
operating system on which the executable runs, unless that component
itself accompanies the executable.
If distribution of executable or object code is made by offering
access to copy from a designated place, then offering equivalent
access to copy the source code from the same place counts as
distribution of the source code, even though third parties are not
compelled to copy the source along with the object code.
4. You may not copy, modify, sublicense, or distribute the Program
except as expressly provided under this License. Any attempt
otherwise to copy, modify, sublicense or distribute the Program is
void, and will automatically terminate your rights under this License.
However, parties who have received copies, or rights, from you under
this License will not have their licenses terminated so long as such
parties remain in full compliance.
5. You are not required to accept this License, since you have not
signed it. However, nothing else grants you permission to modify or
distribute the Program or its derivative works. These actions are
prohibited by law if you do not accept this License. Therefore, by
modifying or distributing the Program (or any work based on the
Program), you indicate your acceptance of this License to do so, and
all its terms and conditions for copying, distributing or modifying
the Program or works based on it.
6. Each time you redistribute the Program (or any work based on the
Program), the recipient automatically receives a license from the
original licensor to copy, distribute or modify the Program subject to
these terms and conditions. You may not impose any further
restrictions on the recipients' exercise of the rights granted herein.
You are not responsible for enforcing compliance by third parties to
this License.
7. If, as a consequence of a court judgment or allegation of patent
infringement or for any other reason (not limited to patent issues),
conditions are imposed on you (whether by court order, agreement or
otherwise) that contradict the conditions of this License, they do not
excuse you from the conditions of this License. If you cannot
distribute so as to satisfy simultaneously your obligations under this
License and any other pertinent obligations, then as a consequence you
may not distribute the Program at all. For example, if a patent
license would not permit royalty-free redistribution of the Program by
all those who receive copies directly or indirectly through you, then
the only way you could satisfy both it and this License would be to
refrain entirely from distribution of the Program.
If any portion of this section is held invalid or unenforceable under
any particular circumstance, the balance of the section is intended to
apply and the section as a whole is intended to apply in other
circumstances.
It is not the purpose of this section to induce you to infringe any
patents or other property right claims or to contest validity of any
such claims; this section has the sole purpose of protecting the
integrity of the free software distribution system, which is
implemented by public license practices. Many people have made
generous contributions to the wide range of software distributed
through that system in reliance on consistent application of that
system; it is up to the author/donor to decide if he or she is willing
to distribute software through any other system and a licensee cannot
impose that choice.
This section is intended to make thoroughly clear what is believed to
be a consequence of the rest of this License.
8. If the distribution and/or use of the Program is restricted in
certain countries either by patents or by copyrighted interfaces, the
original copyright holder who places the Program under this License
may add an explicit geographical distribution limitation excluding
those countries, so that distribution is permitted only in or among
countries not thus excluded. In such case, this License incorporates
the limitation as if written in the body of this License.
9. The Free Software Foundation may publish revised and/or new versions
of the General Public License from time to time. Such new versions will
be similar in spirit to the present version, but may differ in detail to
address new problems or concerns.
Each version is given a distinguishing version number. If the Program
specifies a version number of this License which applies to it and "any
later version", you have the option of following the terms and conditions
either of that version or of any later version published by the Free
Software Foundation. If the Program does not specify a version number of
this License, you may choose any version ever published by the Free Software
Foundation.
10. If you wish to incorporate parts of the Program into other free
programs whose distribution conditions are different, write to the author
to ask for permission. For software which is copyrighted by the Free
Software Foundation, write to the Free Software Foundation; we sometimes
make exceptions for this. Our decision will be guided by the two goals
of preserving the free status of all derivatives of our free software and
of promoting the sharing and reuse of software generally.
NO WARRANTY
11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY
FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN
OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES
PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED
OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS
TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE
PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING,
REPAIR OR CORRECTION.
12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR
REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES,
INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING
OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED
TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY
YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER
PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE
POSSIBILITY OF SUCH DAMAGES.
END OF TERMS AND CONDITIONS
How to Apply These Terms to Your New Programs
If you develop a new program, and you want it to be of the greatest
possible use to the public, the best way to achieve this is to make it
free software which everyone can redistribute and change under these terms.
To do so, attach the following notices to the program. It is safest
to attach them to the start of each source file to most effectively
convey the exclusion of warranty; and each file should have at least
the "copyright" line and a pointer to where the full notice is found.
Copyright (C)
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
Also add information on how to contact you by electronic and paper mail.
If the program is interactive, make it output a short notice like this
when it starts in an interactive mode:
Gnomovision version 69, Copyright (C) year name of author
Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
This is free software, and you are welcome to redistribute it
under certain conditions; type `show c' for details.
The hypothetical commands `show w' and `show c' should show the appropriate
parts of the General Public License. Of course, the commands you use may
be called something other than `show w' and `show c'; they could even be
mouse-clicks or menu items--whatever suits your program.
You should also get your employer (if you work as a programmer) or your
school, if any, to sign a "copyright disclaimer" for the program, if
necessary. Here is a sample; alter the names:
Yoyodyne, Inc., hereby disclaims all copyright interest in the program
`Gnomovision' (which makes passes at compilers) written by James Hacker.
, 1 April 1989
Ty Coon, President of Vice
This General Public License does not permit incorporating your program into
proprietary programs. If your program is a subroutine library, you may
consider it more useful to permit linking proprietary applications with the
library. If this is what you want to do, use the GNU Library General
Public License instead of this License.
================================================
FILE: ApMon/ChangeLog
================================================
================================================
FILE: ApMon/INSTALL
================================================
Installation Instructions
*************************
Copyright (C) 1994, 1995, 1996, 1999, 2000, 2001, 2002, 2004 Free
Software Foundation, Inc.
This file is free documentation; the Free Software Foundation gives
unlimited permission to copy, distribute and modify it.
Basic Installation
==================
These are generic installation instructions.
The `configure' shell script attempts to guess correct values for
various system-dependent variables used during compilation. It uses
those values to create a `Makefile' in each directory of the package.
It may also create one or more `.h' files containing system-dependent
definitions. Finally, it creates a shell script `config.status' that
you can run in the future to recreate the current configuration, and a
file `config.log' containing compiler output (useful mainly for
debugging `configure').
It can also use an optional file (typically called `config.cache'
and enabled with `--cache-file=config.cache' or simply `-C') that saves
the results of its tests to speed up reconfiguring. (Caching is
disabled by default to prevent problems with accidental use of stale
cache files.)
If you need to do unusual things to compile the package, please try
to figure out how `configure' could check whether to do them, and mail
diffs or instructions to the address given in the `README' so they can
be considered for the next release. If you are using the cache, and at
some point `config.cache' contains results you don't want to keep, you
may remove or edit it.
The file `configure.ac' (or `configure.in') is used to create
`configure' by a program called `autoconf'. You only need
`configure.ac' if you want to change it or regenerate `configure' using
a newer version of `autoconf'.
The simplest way to compile this package is:
1. `cd' to the directory containing the package's source code and type
`./configure' to configure the package for your system. If you're
using `csh' on an old version of System V, you might need to type
`sh ./configure' instead to prevent `csh' from trying to execute
`configure' itself.
Running `configure' takes awhile. While running, it prints some
messages telling which features it is checking for.
2. Type `make' to compile the package.
3. Optionally, type `make check' to run any self-tests that come with
the package.
4. Type `make install' to install the programs and any data files and
documentation.
5. You can remove the program binaries and object files from the
source code directory by typing `make clean'. To also remove the
files that `configure' created (so you can compile the package for
a different kind of computer), type `make distclean'. There is
also a `make maintainer-clean' target, but that is intended mainly
for the package's developers. If you use it, you may have to get
all sorts of other programs in order to regenerate files that came
with the distribution.
Compilers and Options
=====================
Some systems require unusual options for compilation or linking that the
`configure' script does not know about. Run `./configure --help' for
details on some of the pertinent environment variables.
You can give `configure' initial values for configuration parameters
by setting variables in the command line or in the environment. Here
is an example:
./configure CC=c89 CFLAGS=-O2 LIBS=-lposix
*Note Defining Variables::, for more details.
Compiling For Multiple Architectures
====================================
You can compile the package for more than one kind of computer at the
same time, by placing the object files for each architecture in their
own directory. To do this, you must use a version of `make' that
supports the `VPATH' variable, such as GNU `make'. `cd' to the
directory where you want the object files and executables to go and run
the `configure' script. `configure' automatically checks for the
source code in the directory that `configure' is in and in `..'.
If you have to use a `make' that does not support the `VPATH'
variable, you have to compile the package for one architecture at a
time in the source code directory. After you have installed the
package for one architecture, use `make distclean' before reconfiguring
for another architecture.
Installation Names
==================
By default, `make install' will install the package's files in
`/usr/local/bin', `/usr/local/man', etc. You can specify an
installation prefix other than `/usr/local' by giving `configure' the
option `--prefix=PREFIX'.
You can specify separate installation prefixes for
architecture-specific files and architecture-independent files. If you
give `configure' the option `--exec-prefix=PREFIX', the package will
use PREFIX as the prefix for installing programs and libraries.
Documentation and other data files will still use the regular prefix.
In addition, if you use an unusual directory layout you can give
options like `--bindir=DIR' to specify different values for particular
kinds of files. Run `configure --help' for a list of the directories
you can set and what kinds of files go in them.
If the package supports it, you can cause programs to be installed
with an extra prefix or suffix on their names by giving `configure' the
option `--program-prefix=PREFIX' or `--program-suffix=SUFFIX'.
Optional Features
=================
Some packages pay attention to `--enable-FEATURE' options to
`configure', where FEATURE indicates an optional part of the package.
They may also pay attention to `--with-PACKAGE' options, where PACKAGE
is something like `gnu-as' or `x' (for the X Window System). The
`README' should mention any `--enable-' and `--with-' options that the
package recognizes.
For packages that use the X Window System, `configure' can usually
find the X include and library files automatically, but if it doesn't,
you can use the `configure' options `--x-includes=DIR' and
`--x-libraries=DIR' to specify their locations.
Specifying the System Type
==========================
There may be some features `configure' cannot figure out automatically,
but needs to determine by the type of machine the package will run on.
Usually, assuming the package is built to be run on the _same_
architectures, `configure' can figure that out, but if it prints a
message saying it cannot guess the machine type, give it the
`--build=TYPE' option. TYPE can either be a short name for the system
type, such as `sun4', or a canonical name which has the form:
CPU-COMPANY-SYSTEM
where SYSTEM can have one of these forms:
OS KERNEL-OS
See the file `config.sub' for the possible values of each field. If
`config.sub' isn't included in this package, then this package doesn't
need to know the machine type.
If you are _building_ compiler tools for cross-compiling, you should
use the `--target=TYPE' option to select the type of system they will
produce code for.
If you want to _use_ a cross compiler, that generates code for a
platform different from the build platform, you should specify the
"host" platform (i.e., that on which the generated programs will
eventually be run) with `--host=TYPE'.
Sharing Defaults
================
If you want to set default values for `configure' scripts to share, you
can create a site shell script called `config.site' that gives default
values for variables like `CC', `cache_file', and `prefix'.
`configure' looks for `PREFIX/share/config.site' if it exists, then
`PREFIX/etc/config.site' if it exists. Or, you can set the
`CONFIG_SITE' environment variable to the location of the site script.
A warning: not all `configure' scripts look for a site script.
Defining Variables
==================
Variables not defined in a site shell script can be set in the
environment passed to `configure'. However, some packages may run
configure again during the build, and the customized values of these
variables may be lost. In order to avoid this problem, you should set
them in the `configure' command line, using `VAR=value'. For example:
./configure CC=/usr/local2/bin/gcc
will cause the specified gcc to be used as the C compiler (unless it is
overridden in the site shell script).
`configure' Invocation
======================
`configure' recognizes the following options to control how it operates.
`--help'
`-h'
Print a summary of the options to `configure', and exit.
`--version'
`-V'
Print the version of Autoconf used to generate the `configure'
script, and exit.
`--cache-file=FILE'
Enable the cache: use and save the results of the tests in FILE,
traditionally `config.cache'. FILE defaults to `/dev/null' to
disable caching.
`--config-cache'
`-C'
Alias for `--cache-file=config.cache'.
`--quiet'
`--silent'
`-q'
Do not print messages saying which checks are being made. To
suppress all normal output, redirect it to `/dev/null' (any error
messages will still be shown).
`--srcdir=DIR'
Look for the package's source code in directory DIR. Usually
`configure' can determine that directory automatically.
`configure' also accepts some other, not widely useful, options. Run
`configure --help' for more details.
================================================
FILE: ApMon/Makefile
================================================
SPECFILE = $(shell find . -maxdepth 1 -type f -name '*.spec' )
DIST ?= $(shell rpm --eval %{dist})
RPMBUILD = $(shell pwd)/rpmbuild
PACKAGE = $(shell awk '$$1 == "Name:" { print $$2 }' $(SPECFILE) )
VERSION = $(shell awk '$$1 == "Version:" { print $$2 }' $(SPECFILE) )
PERLDIR = $(shell perl -V:installsitearch | cut -d "'" -f 2)
INSTALL ?= install
DESTDIR ?= $(RPMBUILD)/BUILDROOT
clean:
rm -rf $(PACKAGE)-$(VERSION)
rm -rf eos-apmon-*.tar.gz
rm -rf $(RPMBUILD)
dist: clean
mkdir -p $(PACKAGE)-$(VERSION)
rsync -aC --exclude '.__afs*' --exclude $(PACKAGE)-$(VERSION) . $(PACKAGE)-$(VERSION)
tar cpfz ./$(PACKAGE)-$(VERSION).tar.gz $(PACKAGE)-$(VERSION)
install:
mkdir -p $(DESTDIR)/perl/ApMon/ApMon/
mkdir -p $(DESTDIR)/etc/logrotate.d/
mkdir -p $(DESTDIR)/opt/eos/apmon
mkdir -p $(DESTDIR)/etc/sysconfig/
mkdir -p $(DESTDIR)/var/log/eos
mkdir -p $(DESTDIR)/$(PERLDIR)/ApMon/ApMon
mkdir -p $(DESTDIR)/usr/sbin/
mkdir -p $(DESTDIR)/usr/lib/systemd/system/
mkdir -p $(DESTDIR)/var/log/eos/apmon
cd perl; for name in `find . -type f | grep -v svn`; do $(INSTALL) -m 755 $$name $(DESTDIR)/$(PERLDIR)/$$name; done
$(INSTALL) -m 644 usr/lib/systemd/system/eosapmond.service $(DESTDIR)/usr/lib/systemd/system/
$(INSTALL) -m 755 opt/eos/apmon/eosapmond $(DESTDIR)/opt/eos/apmon/eosapmond
$(INSTALL) -m 644 etc/logrotate.d/eosapmond $(DESTDIR)/etc/logrotate.d/eosapmond
$(INSTALL) -m 755 run.sh $(DESTDIR)/opt/eos/apmon/run.sh
prepare: dist
mkdir -p $(RPMBUILD)/RPMS/$(DIST)
mkdir -p $(RPMBUILD)/SRPMS/
mkdir -p $(RPMBUILD)/SPECS/
mkdir -p $(RPMBUILD)/SOURCES/
mkdir -p $(RPMBUILD)/BUILD/
cp eos-apmon-*.tar.gz $(RPMBUILD)/SOURCES
cp $(SPECFILE) $(RPMBUILD)/SOURCES
srpm: prepare $(SPECFILE)
rpmbuild --define "_source_filedigest_algorithm md5" --define "_binary_filedigest_algorithm md5" \
--define "_topdir $(RPMBUILD)" -ts $(RPMBUILD)/SOURCES/eos-apmon-*.tar.gz
rpm: srpm
rpmbuild --rebuild --define "_rpmdir $(RPMBUILD)/RPMS/" \
--define "_build_name_fmt %%{NAME}-%%{VERSION}-%%{RELEASE}.%%{ARCH}.rpm" rpmbuild/SRPMS/eos-apmon-*.src.rpm
================================================
FILE: ApMon/NEWS
================================================
================================================
FILE: ApMon/README
================================================
================================================
FILE: ApMon/eos-apmon.spec
================================================
%{!?perl_sitearch: %define perl_sitearch %(eval "`%{__perl} -V:installsitearch`"; echo $installsitearch)}
%define _unpackaged_files_terminate_build 0
%define __os_install_post /bin/true
%define debug_package %{nil}
Summary: eos-apmon package
Name: eos-apmon
Version: 1.1.13
Release: 1%{?dist}
URL: none
Source0: %{name}-%{version}.tar.gz
License: OpenSource
Group: Applications/Eos
BuildRequires: systemd-rpm-macros
Requires: perl
%description
This package contains service scripts for ML monitoring in EOS
The service is started via systemd
systemctl start | stop | status | restart eosapmond.service
The initd scripts were done by Andreas-Joachim Peters [CERN] (EMAIL: andreas.joachim.peters@cern.ch).
%prep
%setup -q
%install
rm -rf %{buildroot}
mkdir -p %{buildroot}
%{__make} install DESTDIR=%{buildroot}
%post
%systemd_post eosapmond.service
%preun
%systemd_preun eosapmond.service
%postun
%systemd_postun_with_restart eosapmond.service
%files
%defattr(-,root,root)
/%{_unitdir}/eosapmond.service
/etc/logrotate.d/eosapmond
%{perl_sitearch}/ApMon/
/opt/eos/apmon/eosapmond
/opt/eos/apmon/run.sh
%changelog
* Mon Apr 28 2025 Martin Vala - 1.1.13-1
- Xrootd version is parsed from eos-xrootd package
* Wed Mar 19 2025 Gianmaria Del Monte - 1.1.12-1
- Move to systemd service
* Fri Jan 26 2024 Volodymyr Yurchenko - 1.1.11-1
- install systemd unit file compatible with Alma 9
* Wed Aug 4 2021 Elvin Sindrilaru - 1.1.10-1
- move the apmon logs out of the EOS FST owned directory and
place them in /var/log/eos/apmon/
- bump version to 1.1.10
* Fri Dec 6 2019 Cristian Contescu - 1.1.9-1
- add fix for interface detection (fix traffic reporting)
* Wed Apr 2 2014 root - 1.1.4-1
- add "_xrootd_" to the instance name
- fix RPM version discovery for EOS and XRootD packages
* Mon Mar 12 2011 root - 1.1.0-0
- Initial build.
================================================
FILE: ApMon/etc/logrotate.d/eosapmond
================================================
/var/log/eos/apmon/apmon.log {
missingok
daily
copytruncate
create 755 root root
dateext
rotate 200
compress
}
================================================
FILE: ApMon/jenkins-build.sh
================================================
#!/bin/bash
#-------------------------------------------------------------------------------
# @author Elvin-Alin Sindrilaru - CERN
# @brief Script used by Jenkins to build EOS ApMon rpms
#-------------------------------------------------------------------------------
#************************************************************************
# * EOS - the CERN Disk Storage System *
# * Copyright (C) 2016 CERN/Switzerland *
# * *
# * This program is free software: you can redistribute it and/or modify *
# * it under the terms of the GNU General Public License as published by *
# * the Free Software Foundation, either version 3 of the License, or *
# * (at your option) any later version. *
# * *
# * This program is distributed in the hope that it will be useful, *
# * but WITHOUT ANY WARRANTY; without even the implied warranty of *
# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
# * GNU General Public License for more details. *
# * *
# * You should have received a copy of the GNU General Public License *
# * along with this program. If not, see .*
# ************************************************************************/
#-------------------------------------------------------------------------------
# Print help
#-------------------------------------------------------------------------------
function printHelp()
{
echo "Usage: " 1>&2
echo "${0} " 1>&2
echo " branch name in the form of \"origin/master\" or tag" 1>&2
echo " name e.g. 1.0.0 for which to build the project " 1>&2
echo " XRootD tag version used for this build " 1>&2
echo " build number value passed in by Jenkins " 1>&2
echo " build platform e.g. slc-6, el-7, fc-24 " 1>&2
echo " build architecture e.g. x86_64, i386 " 1>&2
echo " destination path for the rpms built " 1>&2
}
#-------------------------------------------------------------------------------
# Get the local branch name and dist tag for the rpms. For example local branch
# name of branch 'origin/master' is master. The dist tag for Scientific Linux 5
# can be 'slc5' or 'el5'.
# Function sets two global variables BRANCH and DIST.
#-------------------------------------------------------------------------------
function getLocalBranchAndDistTag()
{
if [[ ${#} -ne 2 ]]; then
echo "Usage: " 1>&2
echo "${0} " 1>&2
echo " branch name in the form of \"origin/master\" or tag" 1>&2
echo " name e.g. 1.0.0 for which to build the project " 1>&2
echo " build platform e.g. slc-6, el-7, fc-24 " 1>&2
exit 1
fi
local BRANCH_OR_TAG=${1}
local PLATORM=${2}
local TAG_REGEX="^[04]+\..*$"
local TAG_REGEX_CITRINE="^4.*$"
# If this is a tag get the branch it belogs to
if [[ "${BRANCH_OR_TAG}" =~ ${TAG_REGEX} ]]; then
if [[ "${BRANCH_OR_TAG}" =~ ${TAG_REGEX_CITRINE} ]]; then
BRANCH="citrine"
fi
else
BRANCH=$(basename ${BRANCH_OR_TAG})
if [[ "${BRANCH}" == "master" ]]; then
BRANCH="citrine"
fi
fi
# For any other branch use the latest XRootD release
XROOTD_TAG="v4.3.0"
DIST=".${PLATFORM}"
# Remove any "-" from the dist tag
DIST="${DIST//-}"
echo "Local branch: ${BRANCH}"
echo "Dist tag: ${DIST} "
}
#-------------------------------------------------------------------------------
# Main - when we are called the current BRANCH_OR_TAG is already checked-out and
# the script must be run from the **same directory** where it resides.
#-------------------------------------------------------------------------------
if [[ ${#} -ne 6 ]]; then
printHelp
exit 1
fi
BRANCH_OR_TAG=${1}
XROOTD_TAG=${2}
BUILD_NUMBER=${3}
PLATFORM=${4}
ARCHITECTURE=${5}
DST_PATH=${6}
echo "Build number: ${BUILD_NUMBER}"
echo "Branch or tag: ${BRANCH_OR_TAG}"
echo "XRootD tag: ${XROOTD_TAG}"
echo "Build platform: ${PLATFORM}"
echo "Build architecture: ${ARCHITECTURE}"
echo "Destination path: ${DST_PATH}"
echo "Running in directory: $(pwd)"
# Get local branch and dist tag for the RPMS
getLocalBranchAndDistTag ${BRANCH_OR_TAG} ${PLATFORM}
# Move to ApMon directory and create the tarball
cd ApMon
./maketar.sh
# Build the source RPM
rpmbuild --define "_source_filedigest_algorithm md5" --define "_binary_filedigest_algorithm md5" --define "_topdir ./rpmbuild" -ts eos-apmon-*.tar.gz
# Move the source RPM
mv rpmbuild/SRPMS/eos-apmon-*.src.rpm .
# Get the mock configurations from gitlab
git clone ssh://git@gitlab.cern.ch:7999/dss/dss-ci-mock.git ../dss-ci-mock
# Prepare the mock configuration
cat ../dss-ci-mock/eos-templates/${PLATFORM}-${ARCHITECTURE}.cfg.in | sed "s/__XROOTD_TAG__/${XROOTD_TAG}/" | sed "s/__BUILD_NUMBER__/${BUILD_NUMBER}/" > eos.cfg
# Build the RPMs
mock --yum --init --uniqueext="eos-apmon01" -r ./eos.cfg --rebuild ./eos-apmon-*.src.rpm --resultdir ../rpms -D "dist ${DIST}"
# List of branches for CI YUM repo
BRANCH_LIST=('citrine')
# If building one of the production branches then push rpms to YUM repo
if [[ ${BRANCH_LIST[*]} =~ $BRANCH ]] ; then
cd ../rpms/
# Make sure the directories are created and rebuild the YUM repo
YUM_REPO_PATH="${DST_PATH}/${BRANCH}/tag/${PLATFORM}/${ARCHITECTURE}"
echo "Save ApMon RPMs in YUM repo: ${YUM_REPO_PATH}"
aklog
mkdir -p ${YUM_REPO_PATH}
cp -f *.rpm ${YUM_REPO_PATH}
createrepo --update -q ${YUM_REPO_PATH}
else
echo "RPMs for branch ${BRANCH} are NOT saved in any YUM repository!"
fi
================================================
FILE: ApMon/maketar.sh
================================================
#!/bin/sh
# Extract package related information
specfile=`find . -maxdepth 1 -name '*.spec' -type f`
name=`awk '$1 == "Name:" { print $2 }' ${specfile}`
version=`awk '$1 == "Version:" { print $2 }' ${specfile}`
# Create the distribution tarball
rm -rf ${name}-${version}
rsync -aC --exclude '.__afs*' . ${name}-${version}
tar -zcf ${name}-${version}.tar.gz ${name}-${version}
rm -rf ${name}-${version}
================================================
FILE: ApMon/opt/eos/apmon/eosapmond
================================================
#!/usr/bin/perl
# apmonpl
if (@ARGV != 6) {
print "Usage: $0 ";
exit(1);
}
# Redirect stdout and stderr to log file
if (!open(STDOUT, ">>", $ARGV[1])) {
print STDERR "ERROR: cannot stream stdout into $ARGV[1]\n";
exit(-1);
}
STDOUT->autoflush(1);
if (!open(STDERR, ">>", $ARGV[1])) {
print STDERR "ERROR: cannot stream stderr into $ARGV[1]\n";
exit(-1);
}
STDERR->autoflush(1);
use POSIX qw(setsid);
my $sid = setsid();
if ($sid < 0) {
print STDERR "ERROR: failed to create new session (setsid())\n";
exit(-1);
}
use strict;
use warnings;
use ApMon;
my $apm = new ApMon(0);
my $now = `date`;
chomp $now;
printf "# Starting at $now\n";
select STDOUT; $| = 1;
select STDERR; $| = 1;
$apm->setLogLevel($ARGV[2]);
$apm->setDestinations(["$ARGV[0]"]);
$apm->setMonitorClusterNode("$ARGV[3]_xrootd_Nodes", "$ARGV[4]");
$apm->addJobToMonitor($ARGV[5], '', 'xrootd_Services', "$ARGV[4]");
while(1){
$apm->sendBgMonitoring();
sleep(120);
}
================================================
FILE: ApMon/perl/ApMon/ApMon/BgMonitor.pm
================================================
package ApMon::BgMonitor;
use strict;
use warnings;
use ApMon::Common qw(logger);
use ApMon::ProcInfo;
use Data::Dumper;
use Net::Domain;
# Settings for Data::Dumper's dump of last values
$Data::Dumper::Indent = 1;
$Data::Dumper::Purity = 1;
# Background Monitor constructor
sub new {
my ($type, $cmdPipe, $confFile, $lastValuesFile, $allowBgProcs, $confLoader) = @_;
my $this = {};
bless $this;
$this->{CMD_PIPE} = $cmdPipe;
$this->{CONF_FILE} = $confFile;
$this->{LAST_VALUES_FILE} = $lastValuesFile;
$this->{ALLOW_BG_PROCESSES} = $allowBgProcs;
$this->{CONFIG_LOADER} = $confLoader;
$this->{LAST_CONF_CHECK_TIME} = 0;
$this->{CONF_RECHECK} = 1;
$this->{CONF_CHECK_INTERVAL} = 20;
$this->{SEND_BG_MONITORING} = 0;
my $hostname = Net::Domain::hostfqdn();
$this->{BG_MONITOR_CLUSTER} = "ApMon_SysMon";
$this->{BG_MONITOR_NODE} = $hostname;
$this->{JOBS} = {};
$this->{PROC_INFO} = new ApMon::ProcInfo();
return $this;
}
# This call will never return!
# It should be a used just from a child process whose role is just background monitoring.
# In order to report data, user has to send a bg_enable message to enable this.
sub run {
my $this = shift;
my $userMsg = "";
sleep(1);
while(1) {
$userMsg = ApMon::Common::readMessage($this->{CMD_PIPE});
$this->parseParentMessage($userMsg) if $userMsg; # use $this->{CMD_PIPE} channel to get messages from user
$this->sendBgMonitoring() if $this->{SEND_BG_MONITORING};
sleep(10); # updates sould never be more often than this!
}
}
# Registers another job for monitoring. This can be called by user or from readMessage.
sub addJobToMonitor {
my ($this, $pid, $workDir, $clusterName, $nodeName) = @_;
$this->{JOBS}->{$pid}->{CLUSTER} = $clusterName;
$this->{JOBS}->{$pid}->{NODE} = $nodeName;
$this->{PROC_INFO}->addJobToMonitor($pid, $workDir);
}
# Removes a job from the monitored processes. This can be called either by user or by readMessage.
sub removeJobToMonitor {
my ($this, $pid) = @_;
delete $this->{JOBS}->{$pid};
$this->{PROC_INFO}->removeJobToMonitor($pid);
}
# Sets the default cluster and node name for the system-related information.
sub setMonitorClusterNode {
my ($this, $cluster, $node) = @_;
$this->{BG_MONITOR_CLUSTER} = $cluster;
$this->{BG_MONITOR_NODE} = $node;
}
# Enables or disables sending of monitoring info
sub enableBgMonitoring {
my ($this, $enable) = @_;
$this->{SEND_BG_MONITORING} = $enable;
}
# Sets the log level for BG_MONITOR
sub setLogLevel {
my ($this, $level) = @_;
ApMon::Common::setLogLevel($level);
}
# Sets the maximum rate for the messages sent by user
sub setMaxMsgRate {
my ($this, $rate) = @_;
ApMon::Common::setMaxMsgRate($rate);
}
# Sets the SI2k meter for this machine
sub setCpuSI2k {
my ($this, $si2k) = @_;
ApMon::Common::setCpuSI2k($si2k);
}
# Sets the cpu speed as the one detected when probing cpu type for si2k
sub setCpuMHz {
my ($this, $mhz) = @_;
$ApMon::Common::CpuMHz = $mhz;
}
# This is used only if BgMonitor is used as a dedicated monitoring process in order to interpret
# messages from parent process.
sub parseParentMessage {
my ($this, $msg) = @_;
my ($pid, $workDir, $cluster, $node);
my @msgs = split(/\n/, $msg);
for $msg (@msgs){
$this->setLogLevel($1) if $msg =~ /loglevel:(.*)/;
$this->setMaxMsgRate($1) if $msg =~ /maxMsgRate:(.*)/;
$this->enableBgMonitoring($1) if $msg =~ /bg_enable:(.*)/;
$this->setCpuSI2k($1) if $msg =~ /cpu_si2k:(.*)/;
$this->setCpuMHz($1) if $msg =~ /cpu_mhz:(.*)/;
$pid = $1 if $msg =~ /pid:(.*)/;
$this->removeJobToMonitor($1) if $msg =~ /rm_pid:(.*)/;
$workDir = $1 if $msg =~ /work_dir:(.*)/;
$cluster = $1 if $msg =~ /bg_cluster:(.*)/;
if($msg =~ /bg_node:(.*)/){
$node = $1;
if(defined $pid){
$this->addJobToMonitor($pid, $workDir, $cluster, $node);
undef $pid;
undef $cluster;
}
if(defined $cluster){
$this->setMonitorClusterNode($cluster, $node);
undef $cluster;
}
}
}
}
# This will send the background information to the interested listeners. It is called either from backgroundMonitor
# or directly by the user from time to time to avoid having a sepparate process for this task.
# information is about the system (load, network, memory etc.) and about a number of jobs (PIDs).
#
# If $mustSend is != 0, the bgMonitoring data is sent regardles of when it was last time sent. This allows
# sending a 'last result', just before the end of a job, and which can happen anytime.
sub sendBgMonitoring {
my $this = shift;
my $mustSend = shift || 0;
ApMon::Common::updateConfig($this);
my (@crtSysParams, @crtJobParams, $now, @sys_results, @job_results, $optsRef, $prevRawData);
$now = time;
my $updatedProcInfo = 0;
for my $dest (keys %{$this->{DESTINATIONS}}) {
$optsRef = $this->{DESTINATIONS}->{$dest}->{OPTS};
$prevRawData = $this->{DESTINATIONS}->{$dest}->{PREV_RAW_DATA};
@crtSysParams = ();
@crtJobParams = ();
# for each destination and its options, check if we have to do any background monitoring
if($optsRef->{'sys_monitoring'} and ($mustSend or $optsRef->{'sys_data_sent'} + $optsRef->{'sys_interval'} <= $now)){
for my $param (keys %$optsRef){
if($param =~ /^sys_(.+)/ and $optsRef->{$param}){
push(@crtSysParams, $1) unless ($1 eq 'monitoring') or ($1 eq 'interval') or ($1 eq 'data_sent');
}
}
$optsRef->{'sys_data_sent'} = $now;
}
if($optsRef->{'job_monitoring'} and ($mustSend or $optsRef->{'job_data_sent'} + $optsRef->{'job_interval'} <= $now)){
for my $param (keys %$optsRef){
if($param =~ /^job_(.+)/ and $optsRef->{$param}){
push(@crtJobParams, "$1") unless ($1 eq 'monitoring') or ($1 eq 'interval') or ($1 eq 'data_sent');
}
}
$optsRef->{'job_data_sent'} = $now;
}
if($optsRef->{'general_info'} and ($mustSend or $optsRef->{'general_data_sent'} + 2 * $optsRef->{'sys_interval'} <= $now)){
for my $param (keys %$optsRef){
if(!($param =~ /^sys_/) and !($param =~ /^job_/) and ($optsRef->{$param})){
push(@crtSysParams, $param) unless ($param eq 'general_info') or ($param eq 'general_data_sent');
}
}
$optsRef->{'general_data_sent'} = $now;
}
if((! $updatedProcInfo) and (@crtSysParams > 0 or @crtJobParams > 0)){
$this->{PROC_INFO}->update();
$updatedProcInfo = 1;
}
@sys_results = ( @crtSysParams ? $this->{PROC_INFO}->getSystemData(\@crtSysParams, $prevRawData) : () );
if(@sys_results){
ApMon::Common::directSendParameters($dest, $this->{BG_MONITOR_CLUSTER}, $this->{BG_MONITOR_NODE}, -1, \@sys_results);
$this->{LAST_VALUES}->{BG_MON_VALUES} = {} if ! $this->{LAST_VALUES}->{BG_MON_VALUES};
$this->update_hash($this->{LAST_VALUES}->{BG_MON_VALUES}, \@sys_results);
}
for my $pid (keys %{$this->{JOBS}}){
@job_results = ( @crtJobParams ? $this->{PROC_INFO}->getJobData($pid, \@crtJobParams) : () );
if(@job_results){
ApMon::Common::directSendParameters($dest, $this->{JOBS}->{$pid}->{CLUSTER},$this->{JOBS}->{$pid}->{NODE},-1,\@job_results);
$this->{LAST_VALUES}->{JOBS}->{$pid}->{BG_MON_VALUES} = {} if ! $this->{LAST_VALUES}->{JOBS}->{$pid}->{BG_MON_VALUES};
$this->update_hash($this->{LAST_VALUES}->{JOBS}->{$pid}->{BG_MON_VALUES}, \@job_results);
}
}
}
if(open(F, ">$this->{LAST_VALUES_FILE}")){
print F Dumper($this->{LAST_VALUES});
close F;
chmod(0600, $this->{LAST_VALUES_FILE});
}else{
logger("WARNING", "Cannot save last BgMonitored values to $this->{LAST_VALUES_FILE}");
}
}
# update in the given hash the rest of pa
sub update_hash {
my $this = shift;
my $hash = shift || {} ;
my $params = shift;
@$params & 1 and logger("WARNING", "Odd number of parameters in update_hash") and return;
while(@$params){
my $key = shift(@$params);
my $val = shift(@$params);
$hash->{$key} = $val;
}
}
1;
================================================
FILE: ApMon/perl/ApMon/ApMon/Common.pm
================================================
package ApMon::Common;
use strict;
use warnings;
require Exporter;
use Carp qw(cluck);
use Socket;
use ApMon::XDRUtils;
use Data::Dumper;
use Sys::Hostname;
use vars qw(@ISA @EXPORT @EXPORT_OK $APMON_DEFAULT_PORT $VERSION %defaultOptions $KSI2K $CpuMHz);
push @ISA, qw(Exporter);
push @EXPORT, qw(logger);
push @EXPORT_OK, qw($APMON_DEFAULT_PORT %defaultOptions);
$VERSION = "2.2.18";
$APMON_DEFAULT_PORT = 8884;
my @LOG_LEVELS = ("DEBUG", "NOTICE", "INFO", "WARNING", "ERROR", "FATAL");
my $CRT_LOGLEVEL = 2; # index in the array above
my $MAX_MSG_RATE = 20; # Default value for max nr. of messages that user is allowed to send, per second
$KSI2K = undef; # kilo spec ints 2k for this machine
$CpuMHz = undef; # Cpu Speed when taking the speed for KSI2k
# Default options for background monitoring
%defaultOptions = (
'job_monitoring' => 1, # perform (or not) job monitoring
'job_interval' => 60, # at this interval (in seconds)
'job_data_sent' => 0, # time from Epoch when job information was sent; don't touch!
'job_cpu_time' => 1, # processor time spent running this job in seconds
'job_cpu_ksi2k' => 1, # used CPU power in ksi2k units (see SpecInt2000 for details);
'job_run_time' => 1, # elapsed time from the start of this job in seconds
'job_run_ksi2k' => 1, # elapsed time in ksi2k units
'job_cpu_usage' => 1, # current percent of the processor used for this job, as reported by ps
'job_virtualmem' => 1, # size in JB of the virtual memory occupied by the job, as reported by ps
'job_rss' => 1, # size in KB of the resident image size of the job, as reported by ps
'job_mem_usage' => 1, # percent of the memory occupied by the job, as reported by ps
'job_workdir_size' => 1, # size in MB of the working directory of the job
'job_disk_total' => 1, # size in MB of the total size of the disk partition containing the working directory
'job_disk_used' => 1, # size in MB of the used disk partition containing the working directory
'job_disk_free' => 1, # size in MB of the free disk partition containing the working directory
'job_disk_usage' => 1, # percent of the used disk partition containing the working directory
'job_open_files' => 1, # number of open file descriptors
'job_page_faults_min' => 1, # number of minor page faults in the job
'job_page_faults_maj' => 1, # number of major page faults in the job
'sys_monitoring' => 1, # perform (or not) system monitoring
'sys_interval' => 60, # at this interval (in seconds)
'sys_data_sent' => 0, # time from Epoch when system information was sent; don't touch!
'sys_cpu_usr' => 1, # cpu-usage information
'sys_cpu_sys' => 1, # all these will produce coresponding paramas without "sys_"
'sys_cpu_nice' => 1,
'sys_cpu_idle' => 1,
'sys_cpu_iowait' => 1,
'sys_cpu_irq' => 1,
'sys_cpu_softirq' => 1,
'sys_cpu_steal' => 1,
'sys_cpu_guest' => 1,
'sys_cpu_usage' => 1,
'sys_interrupts' => 1,
'sys_context_switches' => 1,
'sys_load1' => 1, # system load information
'sys_load5' => 1,
'sys_load15' => 1,
'sys_mem_used' => 1, # memory usage information
'sys_mem_free' => 1,
'sys_mem_actualfree' => 1, # actually free memory: free + cached + buffers
'sys_mem_usage' => 1,
'sys_mem_buffers' => 1,
'sys_mem_cached' => 1,
'sys_blocks_in' => 1,
'sys_blocks_out' => 1,
'sys_swap_used' => 1, # swap usage information
'sys_swap_free' => 1,
'sys_swap_usage' => 1,
'sys_swap_in' => 1,
'sys_swap_out' => 1,
'sys_net_in' => 1, # network transfer in kBps
'sys_net_out' => 1, # these will produce params called ethX_in, ethX_out, ethX_errs
'sys_net_errs' => 1, # for each eth interface
'sys_net_sockets' => 1, # number of opened sockets for each proto => sockets_tcp/udp/unix ...
'sys_net_tcp_details' => 1, # number of tcp sockets in each state => sockets_tcp_LISTEN, ...
'sys_processes' => 1, # total processes and processs in each state (R, S, D ...)
'sys_uptime' => 1, # uptime of the machine, in days (float number)
'general_info' => 1, # send (or not) general host information once every 2 $sys_interval seconds
'general_data_sent' => 0, # time from Epoch when general information was sent; don't touch!
'hostname' => 1,
'ip' => 1, # will produce _ip params for each physical interface
'ipv6' => 1, # will produce _ipv6 params for each physical interface
'kernel_version' => 1,
'eos_rpm_version' => 1,
'xrootd_rpm_version' => 1,
'platform' => 1,
'os_type' => 1,
'cpu_MHz' => 1,
'no_CPUs' => 1, # number of CPUs
'ksi2k_factor' => 1, # system's ksi2k factor, if known
'total_mem' => 1,
'total_swap' => 1,
'cpu_vendor_id' => 1,
'cpu_family' => 1,
'cpu_model' => 1,
'cpu_model_name' => 1,
'cpu_cache' => 1,
'bogomips' => 1);
# Create a UDP socket through which all information is sent
if(! socket(SOCKET, PF_INET, SOCK_DGRAM, getprotobyname("udp"))){
logger("FATAL", "Cannot create UDP socket $@");
die;
}
# Simple logger
sub logger {
my ($level, $msg) = @_;
my $i = 0;
$i++ while (! ($LOG_LEVELS[$i] eq $level) and ($i < @LOG_LEVELS));
if($CRT_LOGLEVEL <= $i and $i < @LOG_LEVELS){
my $now =localtime();
$now =~ s/^\S+\s((\S+\s+){3}).*$/$1/;
print $now."ApMon[$LOG_LEVELS[$i]]: $msg\n";
}
}
# Sets the CRT_LOGLEVEL
sub setLogLevel {
my $level = shift;
logger("NOTICE", "Setting loglevel to $level");
if(! defined $level){
cluck("got undefined level from");
return;
}
my $i = 0;
$i++ while (! ($LOG_LEVELS[$i] eq $level) and ($i < @LOG_LEVELS));
if($i < @LOG_LEVELS){
$CRT_LOGLEVEL = $i;
}else{
logger("WARNING", "Unknown log level \"$level\" - ignoring.\n");
}
}
# Sets the maximum rate for sending messages (see shouldSend subroutine)
sub setMaxMsgRate {
my $rate = shift;
$MAX_MSG_RATE = $rate;
logger("INFO", "Setting maxMsgRate to $rate");
}
# For each destination, we'll keep a pair (instance_id, seq_nr) that will identify us
my $senderRef = {};
my $instance_id = getInstanceID();
# This is used internally to send a set of parameters to a given destination.
sub directSendParameters {
my ($destination, $clusterName, $nodeName, $time, $paramsRef) = @_;
my @params;
if(! defined($paramsRef)){
logger("WARNING", "Not sending undefined parameters!");
return;
}
if(! defined($time)){
logger("WARNING", "Not sending the parameters for an undefined time!");
return;
}
if(! shouldSend()){
#logger("WARNING", "Not sending since the messages are too often!");
return;
}
if(ref($paramsRef->[0]) eq "ARRAY"){
@params = @{$paramsRef->[0]};
}elsif(ref($paramsRef->[0]) eq "HASH"){
@params = %{$paramsRef->[0]};
}else{
@params = @$paramsRef;
}
if(@params == 0){
return;
}
$senderRef->{$destination} = {INSTANCE_ID => $instance_id, SEQ_NR => 0} if ! $senderRef->{$destination};
my $sender = $senderRef->{$destination};
$sender->{INSTANCE_ID} = ($$ << 16) | ($sender->{INSTANCE_ID} && 0xffff);
$sender->{SEQ_NR} = ($sender->{SEQ_NR} + 1) % 2_000_000_000; # wrap around 2 mld
my ($host, $port, $pass) = split(/:/, $destination);
logger("NOTICE", "====> $host|$port|$pass/$clusterName/$nodeName".($time != -1 ? " @ $time" : "")." [$sender->{SEQ_NR} # $sender->{INSTANCE_ID}]");
for(my $i = 0; $i < @params; $i += 2){
if(defined($params[$i]) && defined($params[$i+1])){
logger("NOTICE", " ==> $params[$i] = $params[$i+1]");
}else{
logger("NOTICE", " ==> ".(defined($params[$i]) ? $params[$i] : "undef name")." = ".(defined($params[$i+1]) ? $params[$i+1] : "undef value")." <== ignoring pair");
splice(@params, $i, 2);
$i-=2;
}
}
my $header = "v:${VERSION}_plp:$pass";
my $msg = ApMon::XDRUtils::encodeString($header)
. ApMon::XDRUtils::encodeINT32($sender->{INSTANCE_ID})
. ApMon::XDRUtils::encodeINT32($sender->{SEQ_NR})
. ApMon::XDRUtils::encodeParameters($clusterName, $nodeName, $time, @params);
my $in_addr = inet_aton($host);
my $in_paddr = sockaddr_in($port, $in_addr);
my $msg_len = length($msg);
if(send(SOCKET, $msg, 0, $in_paddr) != $msg_len){
logger("ERROR", "Could not send UDP datagram to $host:$port");
}else{
logger("NOTICE", "~~~~> Packet sent successfully; total size=$msg_len bytes.");
}
}
# This is called by child processes to read messages (if they exist) from the parent.
sub readMessage {
my $PIPE = shift;
my ($rin, $win, $ein, $rout, $wout, $eout) = ('', '', '');
my $retMsg = "";
vec($rin,fileno($PIPE),1) = 1;
$ein = $rin | $win;
my ($nfound,$timeleft) = select($rout=$rin, $wout=$win, $eout=$ein, 0);
if($nfound){
sysread($PIPE, $retMsg, 1024);
logger("DEBUG", "readMessage: $retMsg");
}
return $retMsg;
}
# This is called by main process to send a message to a child that reads form the given pipe
sub writeMessage {
my ($PIPE, $msg) = @_;
if(defined $PIPE){
logger("DEBUG", "writeMessage: $msg");
syswrite($PIPE, $msg);
}else{
logger("ERROR", "Trying to send '$msg' to child, but the pipe is not defined!");
}
}
# copy the time when last data was sent
sub updateLastSentTime {
my ($srcOpts, $dstOpts) = @_;
$dstOpts->{'general_data_sent'} = $srcOpts->{'general_data_sent'} if $srcOpts->{'general_data_sent'};
$dstOpts->{'sys_data_sent'} = $srcOpts->{'sys_data_sent'} if $srcOpts->{'sys_data_sent'};
$dstOpts->{'job_data_sent'} = $srcOpts->{'job_data_sent'} if $srcOpts->{'job_data_sent'};
}
# This is used to update the configuration for an object that has in it's base hash the following elements
# DESTINATIONS, CONF_RECHECK, LAST_CONF_CHECK_TIME, CONF_CHECK_INTERVAL and CONF_FILE.
# In practice, both ApMon and BgMonitor use it to update their configuration.
sub updateConfig {
my $this = shift;
if(! $this->{ALLOW_BG_PROCESSES}){
$this->{DESTINATIONS} = $this->{CONFIG_LOADER}->{DESTINATIONS};
return;
}
my $now = time;
if((scalar(keys %{$this->{DESTINATIONS}}) > 0 and $this->{CONF_RECHECK} == 0)
or ($this->{LAST_CONF_CHECK_TIME} + $this->{CONF_CHECK_INTERVAL} > $now)){
return;
}
logger("DEBUG", "Updating configuration from $this->{CONF_FILE}");
if(open(CONF, "<$this->{CONF_FILE}")){
my $prevDest = $this->{DESTINATIONS} || {};
$this->{DESTINATIONS} = {}; # clear old destinations first
my ($crtDest, $line);
while($line = ){
chomp $line;
if($line =~ /^(\S+):(\S+):(\S*)$/){
# reading a new destination
$crtDest = $line;
my %defOpts = %defaultOptions; #get a copy of the default options
$this->{DESTINATIONS}->{$crtDest}->{OPTS} = \%defOpts;
updateLastSentTime($prevDest->{$crtDest}->{OPTS}, $this->{DESTINATIONS}->{$crtDest}->{OPTS});
$this->{DESTINATIONS}->{$crtDest}->{PREV_RAW_DATA} =
($prevDest->{$crtDest}->{PREV_RAW_DATA} ? $prevDest->{$crtDest}->{PREV_RAW_DATA} : {});
logger("DEBUG", "Adding destination $line");
}elsif($line =~ /^\s(\S+)=(\S+)/) {
# reading an attribute for the current destination and modify the current options
my ($name, $value) = ($1, $2);
logger("DEBUG", "Adding $name=$value");
if($name eq 'loglevel'){
$this->setLogLevel($value);
}elsif($name eq 'conf_recheck'){
$this->{CONF_RECHECK} = $value;
}elsif($name eq 'recheck_interval'){
$this->{CONF_CHECK_INTERVAL} = $value;
}elsif($name eq 'maxMsgRate'){
$this->setMaxMsgRate($value);
}else{
$this->{DESTINATIONS}->{$crtDest}->{OPTS}->{$name} = $value;
}
}else{
logger("WARNING", "Unknown line in conf file: $line");
}
}
close CONF;
}else{
logger("ERROR", "Error opening temporary config file $this->{CONF_FILE}. Current config is unchanged.");
return;
}
$this->{LAST_CONF_CHECK_TIME} = time;
}
# don't allow a user to send more than MAX_MSG messages per second, in average
my $prvTime = 0;
my $prvSent = 0;
my $prvDrop = 0;
my $crtTime = 0;
my $crtSent = 0;
my $crtDrop = 0;
my $hWeight = 0.92;
# Decide if the current datagram should be sent.
# This decision is based on the number of messages previously sent.
sub shouldSend {
my $now = time;
if($now != $crtTime){
# new time
# update previous counters;
$prvSent = $hWeight * $prvSent + (1 - $hWeight) * $crtSent / ($now - $crtTime);
$prvTime = $crtTime;
logger("DEBUG", "previously sent: $crtSent; dropped: $crtDrop");
# reset current counter
$crtTime = $now;
$crtSent = 0;
$crtDrop = 0;
}
my $valSent = $prvSent * $hWeight + $crtSent * (1 - $hWeight); # compute the history
my $doSend = 1;
my $level = $MAX_MSG_RATE - $MAX_MSG_RATE / 10; # when we should start dropping messages
if($valSent > $MAX_MSG_RATE - $level){
$doSend = rand($MAX_MSG_RATE / 10) < ($MAX_MSG_RATE - $valSent);
}
# counting sent and dropped messages
if($doSend){
$crtSent++;
}else{
$crtDrop++;
}
return $doSend;
}
# Try to generate a more random instance id. It takes the process ID and
# combines it with the last digit from the IP addess and a random number
sub getInstanceID {
my $pid = $$;
my $ip = int(rand(256)); # last digit of the ip address
my $host = hostname(); # from Sys::Hostname
if($host){
my $addr = inet_ntoa(scalar gethostbyname($host));
$ip = $1 if $addr =~ /(\d+)$/;
}
my $rnd = int(rand(256));
my $iid = ($pid << 16) | ($ip << 8) | $rnd; # from all this, generate the instance id
return $iid;
}
# Try to determine the CPU type. Returns a hash with: cpu_model_name, cpu_MHz, cpu_cache (in KB)
# TODO: make this work also for Mac.
sub getCpuType {
my $cpu_type = {};
if(-r "/proc/cpuinfo"){
if(open(CPU_INFO, "){
if($line =~ /cpu MHz\s+:\s+(\d+\.?\d*)/){
$cpu_type->{"cpu_MHz"} = $1;
$CpuMHz = $1;
}
$cpu_type->{"cpu_model_name"} = $1 if($line =~ /model name\s+:\s+(.+)/ || $line =~ /family\s+:\s+(.+)/);
$cpu_type->{"cpu_cache"} = $1 if($line =~ /cache size\s+:\s+(\d+)/);
}
close(CPU_INFO);
}else{
logger("NOTICE", "Cannot open /proc/cpuinfo");
}
}
if(-r "/proc/pal/cpu0/cache_info"){
if(open(CACHE_INFO, "){
$level3params = 1 if($line =~/Cache level 3/);
$cpu_type->{"cpu_cache"} = $1 / 1024 if ($level3params && $line =~ /Size\s+:\s+(\d+)/);
}
close(CACHE_INFO);
}else{
logger("NOTICE", "Cannot open /proc/pal/cpu0/cache_info");
}
}
if(! scalar(keys(%$cpu_type))){
logger("INFO", "Cannot get cpu type");
return undef;
}
return $cpu_type;
}
# Set the SI2K performance meter for this machine. If this function is called then parameter
# cpu_ksi2k will also be reported for the job monitoring with a value computed this way:
# cpu_ksi2k(job) = cpu_time(job) * ( si2k / 1000)
sub setCpuSI2k {
my $si2k = shift;
$KSI2K = $si2k / 1000.0 if($si2k);
}
1;
================================================
FILE: ApMon/perl/ApMon/ApMon/ConfigLoader.pm
================================================
package ApMon::ConfigLoader;
use strict;
use warnings;
use ApMon::Common qw(logger $APMON_DEFAULT_PORT %defaultOptions);
use Socket;
use Data::Dumper;
use Carp qw(cluck);
# Config Loader constructor
sub new {
my ($type, $cmdPipe, $confFile) = @_;
my $this = {};
bless $this;
$this->{CMD_PIPE} = $cmdPipe;
$this->{CONF_FILE} = $confFile;
$this->{LAST_CONF_CHECK_TIME} = 0;
$this->{CONF_RECHECK} = 1;
$this->{CONF_CHECK_INTERVAL} = 30;
$this->{DEST_LOCATIONS} = (); # http/file locations from where to read the config
$this->{DESTINATIONS} = {};
return $this;
}
# This call will never return!
# It should be a used just from a child process whose role is only configuration refreshing.
sub run {
my $this = shift;
my ($wasSuccess, $userMsg) = (0, undef);
while(1) {
$userMsg = ApMon::Common::readMessage($this->{CMD_PIPE});
$this->parseParentMessage($userMsg) if $userMsg;
$wasSuccess = $this->refreshConfig($wasSuccess) if $this->{CONF_RECHECK};
sleep($this->{CONF_CHECK_INTERVAL});
}
}
# This allows setting the configuration. It can be used with several arguments:
# - list of strings (URLs and/or files) - the configuration will be read from all
# - reference to an ARRAY - each element is a destination ML service; for each destination
# the default options will be used
# - reference to a HASH - each key is a destination ML service; for each destination you can
# define a set of additional options that will overwrite the default ones.
sub setDestinations {
my ($this, @destLocations) = @_;
my $prevDest = $this->{DESTINATIONS};
$this->{DESTINATIONS} = {};
# determine the way we were instantiated and initalize accordingly
if(ref($destLocations[0]) eq "ARRAY"){
# user gave a reference to an array, each element being a destination (host[:port][ pass])
# we will send datagrams to all valid destinations (i.e. host can be resolved), with default options
$this->{CONF_RECHECK} = 0;
my ($destStr, $dest);
for $destStr (@{$destLocations[0]}) {
$dest = $this->parseDestination($destStr);
if($dest){
my %defOptsCopy = %defaultOptions;
logger("INFO", "Added destination $dest with default options.");
$this->{DESTINATIONS}->{$dest}->{OPTS} = \%defOptsCopy;
ApMon::Common::updateLastSentTime($prevDest->{$dest}->{OPTS}, $this->{DESTINATIONS}->{$dest}->{OPTS});
$this->{DESTINATIONS}->{$dest}->{PREV_RAW_DATA} =
($prevDest->{$dest}->{PREV_RAW_DATA} ? $prevDest->{$dest}->{PREV_RAW_DATA} : {});
$this->{DESTINATIONS}->{$dest}->{OPTS}->{'conf_recheck'} = 0;
}
}
$this->writeDestinations();
}elsif(ref($destLocations[0]) eq "HASH"){
# user gave a reference to a hash, each key being a destination (host[:port][ pass])
# we will send datagrams to all valid destinations (i.e. host can be resolved), overwritting the
# default options with the ones passed by user. Options will be named as in the %defaultOptions.
$this->{CONF_RECHECK} = 0;
my ($destStr, $dest);
for $destStr (keys %{$destLocations[0]}){
$dest = $this->parseDestination($destStr);
if($dest){
my %defOptsCopy = %defaultOptions;
$this->{DESTINATIONS}->{$dest}->{OPTS} = \%defOptsCopy;
ApMon::Common::updateLastSentTime($prevDest->{$dest}->{OPTS}, $this->{DESTINATIONS}->{$dest}->{OPTS});
$this->{DESTINATIONS}->{$dest}->{PREV_RAW_DATA} =
($prevDest->{$dest}->{PREV_RAW_DATA} ? $prevDest->{$dest}->{PREV_RAW_DATA} : {});
$this->{DESTINATIONS}->{$dest}->{OPTS}->{'conf_recheck'} = 0;
logger("INFO", "Added destination $dest with the following additional options:");
# now we have to modify default options with the ones given by user
my ($key, $value);
for $key (keys %{$destLocations[0]->{$destStr}}){
$value = $destLocations[0]->{$destStr}->{$key};
logger("INFO", " -> $key = $value");
$this->{DESTINATIONS}->{$dest}->{OPTS}->{$key} = $value;
}
}
}
$this->writeDestinations();
}else{
# we got a list of URLs and/or files. Fetch them and get the configuration
$this->{DEST_LOCATIONS} = ();
push(@{$this->{DEST_LOCATIONS}}, @destLocations);
$this->refreshConfig();
}
}
# This will fetch all the configuration files and then, if this part was succesful, it
# will call parseConfig to build the temporary configuration file from which both Main
# and BgMonitor will read the destinations
sub refreshConfig {
my $this = shift;
my $wasSuccess = shift || 0;
if(! $this->{DEST_LOCATIONS} || (! @{$this->{DEST_LOCATIONS}})){
logger("NOTICE", "No configuration file was given.");
return $wasSuccess;
}
my ($error, $linesRef);
logger("DEBUG", "Refreshing config from pid $$");
($error, $linesRef) = $this->fetchConfig(@{$this->{DEST_LOCATIONS}});
if(! $error){ # or ($error and ($wasSuccess < @$linesRef))){
# it reading destinations worked ok, or if we had a partial error reading files,
# but the configuration size is bigger than earlier, apply those new changes
$wasSuccess = @$linesRef;
$this->parseConfig($linesRef);
}else{
logger("WARNING", "Failed reading destination files/urls. Configuration will remain unchanged.");
}
return $wasSuccess;
}
# fetch the configuration form all given files/URLs. It returns a pair ($error, $linesRef) where
# $error contains the number of locations from where the retrieval of the configuration failed.
# $linesRef is a reference to an array containing all the lines.
sub fetchConfig {
my ($this, @dests) = @_;
my @lines = ();
my $error = 1;
for my $dest (@dests){
if ( $dest =~ /^http:\/\// ) {
logger("INFO", "Reading config from url: $dest");
require LWP::UserAgent;
my $ua = LWP::UserAgent->new();
$ua->timeout(5);
$ua->env_proxy();
my $response = $ua->get($dest);
if($response->is_success){
push(@lines, split("\n", $response->content . "\nEND_PART\n"));
$error = 0;
}else{
logger("WARNING", "Error reading url: $dest");
logger("WARNING", "Got: ".$response->status_line);
}
}else{
logger("INFO", "Reading config from file: $dest");
if(open(INFILE, "<$dest")){
my @newlines = ;
push(@lines, @newlines);
close(INFILE);
push(@lines, split("\n", "\nEND_PART\n"));
$error = 0;
}else{
logger("WARNING", "Error reading file: $dest");
}
}
}
return ($error, \@lines);
}
# This will parse the config lines brought by fetchConfig, creating the local temporary config file
sub parseConfig {
my ($this, $linesRef) = @_;
my @lines = @$linesRef;
my @dests = ();
my %opts = ();
my $prevDest = $this->{DESTINATIONS};
$this->{DESTINATIONS} = {};
for my $line (@lines) {
chomp $line;
next if $line =~ /^\s*$/; # skip empty lines
next if $line =~ /^\s*#/; # skip comments
$line =~ s/\s+/ /g; # eliminate multiple spaces
$line =~ s/^ //; # remove space at the beginning
$line =~ s/ $//; # remove space at the end
if($line =~ /^xApMon_(.*)/){
# set an option for the current destinations
my $opt = $1;
if($opt =~ /(\S+)\s?=\s?(\S+)/){
my ($name, $value) = ($1, $2);
$value = $value =~ /off/i ? 0 : $value;
$value = $value =~ /on/i ? 1 : $value;
$opts{$name} = $value;
$this->setLogLevel($value) if $name eq "loglevel";
$this->setMaxMsgRate($value) if $name eq "maxMsgRate";
$this->{CONF_RECHECK} = $value if $name eq "conf_recheck";
$this->{CONF_CHECK_INTERVAL} = $value if $name eq "recheck_interval";
#logger("DEBUG", "set option $name <- $value");
}
}elsif($line =~ /END_PART/){
#logger("DEBUG", "Storing options into the temp conf file");
for my $dest (@dests){
my %optsCopy = %opts;
$this->{DESTINATIONS}->{$dest}->{OPTS} = \%optsCopy;
ApMon::Common::updateLastSentTime($prevDest->{$dest}->{OPTS}, $this->{DESTINATIONS}->{$dest}->{OPTS});
$this->{DESTINATIONS}->{$dest}->{PREV_RAW_DATA} =
($prevDest->{$dest}->{PREV_RAW_DATA} ? $prevDest->{$dest}->{PREV_RAW_DATA} : {});
}
@dests = ();
%opts = ();
}else{
# parse a new destination
my $dest = $this->parseDestination($line);
push(@dests, $dest) if $dest;
}
}
$this->writeDestinations();
}
# Write the destinations to the temporary config file, to be able to get them also from the
# other processes
sub writeDestinations {
my $this = shift;
if(open(CONF, ">".$this->{CONF_FILE}.".tmp")) {
logger("DEBUG", "Writting config to $this->{CONF_FILE}");
my ($dest, $opt, $val);
for $dest (keys %{$this->{DESTINATIONS}}) {
print CONF "$dest\n";
for $opt (keys %{$this->{DESTINATIONS}->{$dest}->{OPTS}}) {
$val = $this->{DESTINATIONS}->{$dest}->{OPTS}->{$opt};
print CONF " $opt=$val\n";
}
}
close(CONF);
chmod(0600, $this->{CONF_FILE}.'.tmp');
# this is done in order to keep the interference between the processes as small as possible
rename($this->{CONF_FILE}.'.tmp', $this->{CONF_FILE});
}else{
logger("ERROR", "Cannot write destinations to file $this->{CONF_FILE}");
}
}
# Given a destination line (i.e. host[:port][ passwd]), it returns an array containing at most one
# string of the following form: "ip:port:passwd"
# This is what will be used as a destination in sending the directSendParameters.
sub parseDestination {
my ($this, $line) = @_;
my $dest = "";
if($line =~ /([\.\-a-zA-Z0-9]+)\s*:?\s*(\d+)?\s*(.*)?/){
my ($host, $port, $pass) = ($1, $2, $3);
$port = (! defined($port) || $port eq "") ? $APMON_DEFAULT_PORT : $port;
my ($name,$aliases,$type,$len,$addr) = gethostbyname($host);
if (defined($len) and $len == 4) {
my $ip = inet_ntoa($addr);
logger("DEBUG", "found destination i=$ip, P=$port, p=$pass");
$dest = "$ip:$port:$pass";
}else{
logger("WARNING", "Error resolving host $host");
}
}
return $dest;
}
# This will parse the options sent by functions in ApMon
sub parseParentMessage {
my ($this, $msg) = @_;
my @msgs = split(/\n/, $msg);
my @dests = ();
logger("DEBUG", "Reading messages from user");
for $msg (@msgs){
$this->setLogLevel($1) if $msg =~ /loglevel:(.*)/;
$this->setMaxMsgRate($1) if $msg =~ /maxMsgRate:(.*)/;
$this->{CONF_RECHECK} = $1 if $msg =~ /conf_recheck:(.*)/;
$this->{CONF_CHECK_INTERVAL} = $1 if $msg =~ /recheck_interval:(.*)/;
push(@dests, $1) if $msg =~/dest:(.*)/;
}
$this->setDestinations(@dests) if @dests;
}
# Sets the log level for CONFIG_LOADER
sub setLogLevel {
my ($this, $level) = @_;
ApMon::Common::setLogLevel($level);
}
# Sets the maximum rate for the messages sent by user
sub setMaxMsgRate {
my ($this, $rate) = @_;
ApMon::Common::setMaxMsgRate($rate);
}
1;
================================================
FILE: ApMon/perl/ApMon/ApMon/ProcInfo.pm
================================================
package ApMon::ProcInfo;
use strict;
use warnings;
use ApMon::Common qw(logger);
use Data::Dumper;
use Net::Domain;
use Time::Local;
use Config;
# See the end of this file for a set of interesting methods for other modules.
# ProcInfo constructor
sub new {
my $this = {};
$this->{DATA} = {}; # monitored data that is going to be reported
$this->{JOBS} = {}; # jobs that will be monitored
$this->{NETWORKINTERFACES} = {};# network interface names
# names of the months for ps start time of a process
$this->{MONTHS} = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec'];
$this->{readGI} = 0; # used to read generic information less often
bless $this;
return $this;
}
# this has to be run twice (with the $lastUpdateTime updated) to get some useful results
sub readStat {
my $this = shift;
if ($Config{osname} eq "solaris"){
if (open(VMSTAT, "vmstat -s |")){
my $line;
while ($line = ){
if ($line =~ /(\d+)\s+user\s+cpu/ ){
$this->{DATA}->{"raw_cpu_usr"} = $1;
}
if ($line =~ /(\d+)\s+system\s+cpu/){
$this->{DATA}->{"raw_cpu_sys"} = $1;
}
if ($line =~ /(\d+)\s+idle\s+cpu/){
$this->{DATA}->{"raw_cpu_idle"} = $1;
}
if ($line =~ /(\d+)\s+wait\s+cpu/){
$this->{DATA}->{"raw_cpu_iowait"} = $1;
}
if ($line =~ /(\d+)\s+pages\s+swapped\s+in/){
$this->{DATA}->{"raw_swap_in"} = $1;
}
if ($line =~ /(\d+)\s+pages\s+swapped\s+out/){
$this->{DATA}->{"raw_swap_out"} = $1;
}
if ($line =~ /(\d+)\s+device\s+interrupts/){
$this->{DATA}->{"raw_interrupts"} = $1;
}
if ($line =~ /(\d+)\s+cpu\s+context\s+switches/){
$this->{DATA}->{"raw_context_switches"} = $1;
}
}
close VMSTAT;
}
$this->{DATA}->{"raw_blocks_in"} = $this->{DATA}->{"raw_blocks_out"} = 0;
if (open(IOSTAT, "iostat -xnI |")){
my $line = ;
$line = ;
my $kbR = 0;
my $kbW = 0;
while ($line = ){
(undef, $kbR, $kbW) = split(/\s+/, $line);
$this->{DATA}->{"raw_blocks_in"} += $kbR;
$this->{DATA}->{"raw_blocks_out"} += $kbW;
}
close IOSTAT;
}
return;
}
if(open(STAT, "){
if($line =~ /^cpu\s/) {
(undef, $this->{DATA}->{"raw_cpu_usr"}, $this->{DATA}->{"raw_cpu_nice"},
$this->{DATA}->{"raw_cpu_sys"}, $this->{DATA}->{"raw_cpu_idle"},
$this->{DATA}->{"raw_cpu_iowait"}, $this->{DATA}->{"raw_cpu_irq"},
$this->{DATA}->{"raw_cpu_softirq"}, $this->{DATA}->{"raw_cpu_steal"},
$this->{DATA}->{"raw_cpu_guest"}
) = split(/ +/, $line);
}
if($line =~ /^page/) { # this doesn't work for kernel >2.5
(undef, $this->{DATA}->{"raw_blocks_in"}, $this->{DATA}->{"raw_blocks_out"}) = split(/ +/, $line);
}
if($line =~/^swap/) { # this also doesn't work in >2.5
(undef, $this->{DATA}->{"raw_swap_in"}, $this->{DATA}->{"raw_swap_out"}) = split(/ +/, $line);
}
$this->{DATA}->{"raw_interrupts"} = $1 if($line =~ /^intr\s(\d+)/);
$this->{DATA}->{"raw_context_switches"} = $1 if($line =~ /^ctxt\s(\d+)/);
}
close STAT;
}else{
logger("NOTICE", "ProcInfo: cannot open /proc/stat");
#disable ..?
}
# blocks_in/out and swap_in/out are moved to /proc/vmstat in >2.5 kernels
if(-r "/proc/vmstat"){
if(open(VMSTAT, "){
$this->{DATA}->{"raw_blocks_in"} = $1 if($line =~ /^pgpgin\s(\d+)/);
$this->{DATA}->{"raw_blocks_out"}= $1 if($line =~ /^pgpgout\s(\d+)/);
$this->{DATA}->{"raw_swap_in"} = $1 if($line =~ /^pswpin\s(\d+)/);
$this->{DATA}->{"raw_swap_out"} = $1 if($line =~ /^pswpout\s(\d+)/);
}
close VMSTAT;
}else{
logger("NOTICE", "Procinfo: cannot open /proc/vmstat");
}
}
}
# sizes are reported in MB (except _usage that is in percent).
sub readMemInfo {
my $this = shift;
if ($Config{osname} eq "solaris"){
if (open(MEM_INFO, "prtconf |")){
my $line;
while ($line = ){
if ($line =~ /^Memory size: (\d+) (\w)/){
$this->{DATA}->{"total_mem"} = $1;
if ($2 eq "G"){
$this->{DATA}->{"total_mem"} *= 1024;
}
}
}
close MEM_INFO;
}
if (open(MEM_INFO, "vmstat |")){
my $line;
# first header line
$line = ;
# second header line
$line = ;
# and the contents
$line = ;
if ($line =~ /^\s*\d+\s+\d+\s+\d+\s+\d+\s+(\d+)/){
my $memfree = $1 / 1024;
$this->{DATA}->{"mem_free"} = $memfree;
$this->{DATA}->{"mem_actual_free"} = $memfree;
$this->{DATA}->{"mem_used"} = $this->{DATA}->{"total_mem"} - $memfree;
$this->{DATA}->{"mem_usage"} = $this->{DATA}->{"mem_used"} * 100 / $this->{DATA}->{"total_mem"} if $this->{DATA}->{"total_mem"};
}
close MEM_INFO;
}
if (open(MEM_INFO, "swap -l |")){
my $line;
$line = ;
$this->{DATA}->{"total_swap"} = 0;
$this->{DATA}->{"swap_free"} = 0;
while ($line = ){
if ($line =~ /(\d+)\s+(\d+)$/){
$this->{DATA}->{"total_swap"} += $1 / 2048;
$this->{DATA}->{"swap_free"} += $2 / 2048;
}
}
$this->{DATA}->{"swap_used"} = $this->{DATA}->{"total_swap"} - $this->{DATA}->{"swap_free"};
$this->{DATA}->{"swap_usage"} = 100.0 * $this->{DATA}->{"swap_used"} / $this->{DATA}->{"total_swap"} if $this->{DATA}->{"total_swap"};
}
return;
}
if(open(MEM_INFO, "){
if($line =~ /^MemFree:/){
my (undef, $mem_free) = split(/ +/, $line);
$this->{DATA}->{"mem_free"} = $mem_free / 1024.0;
}
if($line =~ /^MemTotal:/){
my (undef, $mem_total) = split(/ +/, $line);
$this->{DATA}->{"total_mem"} = $mem_total / 1024.0;
}
if($line =~ /^SwapFree:/){
my (undef, $swap_free) = split(/ +/, $line);
$this->{DATA}->{"swap_free"} = $swap_free / 1024.0;
}
if($line =~ /^SwapTotal:/){
my (undef, $swap_total) = split(/ +/, $line);
$this->{DATA}->{"total_swap"} = $swap_total / 1024.0;
}
if($line =~ /^Buffers:/){
my (undef, $buffers) = split(/ +/, $line);
$this->{DATA}->{"mem_buffers"} = $buffers / 1024.0;
}
if($line =~ /^Cached:/){
my (undef, $cached) = split(/ +/, $line);
$this->{DATA}->{"mem_cached"} = $cached / 1024.0;
}
}
close MEM_INFO;
$this->{DATA}->{"mem_actualfree"} = $this->{DATA}->{"mem_free"} + $this->{DATA}->{"mem_buffers"} + $this->{DATA}->{"mem_cached"}
if ($this->{DATA}->{"mem_free"} && $this->{DATA}->{"mem_buffers"} && $this->{DATA}->{"mem_cached"});
$this->{DATA}->{"mem_used"} = $this->{DATA}->{"total_mem"} - $this->{DATA}->{"mem_actualfree"}
if ($this->{DATA}->{"total_mem"} && $this->{DATA}->{"mem_actualfree"});
$this->{DATA}->{"swap_used"} = $this->{DATA}->{"total_swap"} - $this->{DATA}->{"swap_free"} if $this->{DATA}->{"total_swap"};
$this->{DATA}->{"mem_usage"} = 100.0 * $this->{DATA}->{"mem_used"} / $this->{DATA}->{"total_mem"}
if ($this->{DATA}->{"total_mem"} && $this->{DATA}->{"mem_used"});
$this->{DATA}->{"swap_usage"} = 100.0 * $this->{DATA}->{"swap_used"} / $this->{DATA}->{"total_swap"} if $this->{DATA}->{"total_swap"};
}else{
logger("NOTICE", "ProcInfo: cannot open /proc/meminfo");
}
}
# read the number of processes currently running on the system
# count also the number of runnable, sleeping, zombie, io blocked and traced processes
# works on Darwin
sub countProcesses {
my $this = shift;
my $total = 0;
my %states = ('D' => 0, 'R' => 0, 'S' => 0, 'T' => 0, 'Z' => 0);
my $command = "ps -A -o state |";
if ($Config{osname} eq "solaris"){
$command = "ps -A -o s |";
}
if(open(PROC, $command)){
my $state = ; # ignore the first line - it's the header
while(){
$state = substr($_, 0, 1);
$states{$state}++;
$total++;
}
close PROC;
$this->{DATA}->{"processes"} = $total;
for $state (keys %states){
next if (($state eq '') || ($state =~ /\s+/));
$this->{DATA}->{"processes_$state"} = $states{$state};
}
}
else{
logger("NOTICE", "ProcInfo: cannot count the processes using ps.");
}
}
#Read information about CPU.
sub readCPUInfo {
my $this = shift;
if ($Config{osname} eq "solaris"){
chomp ($this->{DATA}->{"no_CPUs"} = `psrinfo -p`);
return;
}
if(-r "/proc/cpuinfo"){
if(open(CPU_INFO, "){
if($line =~ /cpu MHz\s+:\s+(\d+\.?\d*)/){
$this->{DATA}->{"cpu_MHz"} = $1;
$no_cpus ++;
}
if($line =~ /vendor_id\s+:\s+(.+)/ || $line =~ /vendor\s+:\s+(.+)/){
$this->{DATA}->{"cpu_vendor_id"} = $1;
}
if($line =~ /cpu family\s+:\s+(.+)/ || $line =~ /revision\s+:\s+(.+)/){
$this->{DATA}->{"cpu_family"} = $1;
}
if($line =~ /model\s+:\s+(.+)/) {
$this->{DATA}->{"cpu_model"} = $1;
}
if($line =~ /model name\s+:\s+(.+)/ || $line =~ /family\s+:\s+(.+)/){
$this->{DATA}->{"cpu_model_name"} = $1;
}
if($line =~ /bogomips\s+:\s+(\d+\.?\d*)/ || $line =~ /BogoMIPS\s+:\s+(\d+\.?\d*)/){
$this->{DATA}->{"bogomips"} = $1;
}
if($line =~ /cache size\s+:\s+(\d+)/){
$this->{DATA}->{"cpu_cache"} = $1;
}
}
close CPU_INFO;
$this->{DATA}->{"no_CPUs"} = $no_cpus;
}
}
# this is for Itanium
if(-r "/proc/pal/cpu0/cache_info"){
if(open(CACHE_INFO, "){
$level3params = 1 if ($line =~ /Cache level 3/);
$this->{DATA}->{"cpu_cache"} = $1 / 1024 if ($level3params && $line =~ /Size\s+:\s+(\d+)/);
}
close(CACHE_INFO);
}
}
# also put the ksi2k factor, if known
$this->{DATA}->{"ksi2k_factor"} = $ApMon::Common::KSI2K if $ApMon::Common::KSI2K;
}
# reads the IP, hostname, cpu_MHz, kernel_version, os_version, platform
sub readGenericInfo {
my $this = shift;
my $hostname = Net::Domain::hostfqdn();
$this->{DATA}->{"hostname"} = $hostname;
if ($Config{osname} eq "solaris"){
chomp ($this->{DATA}->{"os_type"} = `uname -sr`);
$this->{DATA}->{"platform"} = "solaris";
$this->{DATA}->{"kernel_version"} = $Config{osvers};
if (open(IF_CFG, "ifconfig -a4 |")){
my ($eth, $ip, $line);
while ($line = ){
if ($line =~ /^(\w+\d):/){
$eth = $1;
}
if (defined($eth) and ($line =~ /\s+inet\s+(\d+\.\d+\.\d+\.\d+)/)){
$ip = $1;
next if ($eth =~ /^lo/);
$this->{DATA}->{$eth."_ip"} = $ip;
# fake eth0 on solaris
$this->{DATA}->{"eth0_ip"} = $ip unless $this->{DATA}->{"eth0_ip"};
}
}
}
return;
}
if(open(IF_CFG, "/sbin/ifconfig -a |")){
my ($eth, $ip, $ipv6, $line);
while($line = ){
if($line =~ /^(\w+):?\s+/ ){
undef $ip;
if (exists($this->{NETWORKINTERFACES}->{$1})){
$eth = $1;
undef $ip;
undef $ipv6;
}
else{
undef $eth;
}
next;
}
if ($line =~ /^\w/){
undef $eth;
undef $ip;
undef $ipv6;
next;
}
if(defined($eth) and ($line =~ /\s+inet( addr:)?\s*(\d+\.\d+\.\d+\.\d+)/) and ! defined($ip)){
$ip = $2;
$this->{DATA}->{$eth."_ip"} = $ip;
undef $ipv6;
}
if(defined($eth) and ($line =~ /\s+inet6( addr:)?\s*([0-9a-fA-F:]+).*(Scope:Global|scopeid.*global)/) and ! defined($ipv6)){
$ipv6 = $2;
$this->{DATA}->{$eth."_ipv6"} = $ipv6;
}
}
close IF_CFG;
}else{
logger("NOTICE", "ProcInfo: couldn't get output from /sbin/ifconfig -a");
}
# determine the kernel version
my $line = `uname -r`;
chomp $line;
$this->{DATA}->{"kernel_version"} = $line;
# determine the platform
$line = `uname -m 2>/dev/null || uname`;
chomp $line;
$this->{DATA}->{"platform"} = $line;
# try to determine the OS type
my $osType = "";
if(open(LSB_RELEASE, 'env PATH=$PATH:/bin:/usr/bin lsb_release -d 2>/dev/null |')){
my $line = ;
$osType = $1 if ($line && $line =~ /Description:\s*(.*)/);
close LSB_RELEASE;
}
if(! $osType){
for my $f ("/etc/redhat-release", "/etc/debian_version", "/etc/SuSE-release",
"/etc/slackware-version", "/etc/gentoo-release", "/etc/mandrake-release",
"/etc/mandriva-release", "/etc/issue"){
if(open(VERF, "$f")){
$osType = ;
chomp $osType;
close VERF;
last;
}
}
}
if(! $osType){
$osType = `uname -s`;
chomp $osType;
}
$this->{DATA}->{"os_type"} = $osType;
}
# read system's uptime and load average. Time is reported as a floating number, in days.
# It uses the 'uptime' command which's output looks like these:
# 19:55:37 up 11 days, 18:57, 1 user, load average: 0.00, 0.00, 0.00
# 18:42:31 up 87 days, 18:10, 9 users, load average: 0.64, 0.84, 0.80
# 6:42pm up 7 days 3:08, 7 users, load average: 0.18, 0.14, 0.10
# 6:42pm up 33 day(s), 1:54, 1 user, load average: 0.01, 0.00, 0.00
# 18:42 up 7 days, 3:45, 2 users, load averages: 1.10 1.11 1.06
# 18:47:41 up 7 days, 4:35, 19 users, load average: 0.66, 0.44, 0.41
# 15:10 up 8 days, 12 mins, 2 users, load averages: 1.46 1.27 1.18
# 11:57am up 2:21, 22 users, load average: 0.59, 0.93, 0.73
sub readUptimeAndLoadAvg {
my $this = shift;
my $line = `uptime`;
chomp $line;
if($line =~ /up\s+((\d+)\s+day[ (s),]+)?(\d+)(:(\d+))?[^\d]+(\d+)[^\d]+([\d\.]+)[^\d]+([\d\.]+)[^\d]+([\d\.]+)/){
my ($days, $hour, $min, $users, $load1, $load5, $load15) = ($2, $3, $5, $6, $7, $8, $9);
if(! $min){
$min = $hour;
$hour = 0;
}
$days = 0 if ! $days;
my $uptime = $days + $hour / 24.0 + $min / 1440.0;
$this->{DATA}->{"uptime"} = $uptime;
$this->{DATA}->{"logged_users"} = $users; # this is currently not reported!
$this->{DATA}->{"load1"} = $load1;
$this->{DATA}->{"load5"} = $load5;
$this->{DATA}->{"load15"}= $load15;
}
else{
logger("NOTICE", "ProcInfo: got unparsable output from uptime: $line");
}
}
sub readEosDiskValues {
my $this = shift;
my $storagepath=$ENV{"APMON_STORAGEPATH"};
if ( "$storagepath" eq "" ) {
$storagepath = "data";
}
if (open IN, "df -P -B 1 | grep $storagepath | grep -v Filesystem | awk '{a+=\$2;b+=\$3;c+=\$4;print a,b,c}' | tail -1|") {
my $all = ;
if ($all) {
my @vals = split (" ",$all);
$this->{DATA}->{"eos_disk_space"} = sprintf "%.03f",$vals[0]/1024.0/1024.0/1024.0/1024.0;
$this->{DATA}->{"eos_disk_used"} = sprintf "%.03f",$vals[1]/1024.0/1024.0/1024.0/1024.0;
$this->{DATA}->{"eos_disk_free"} = sprintf "%.03f",$vals[2]/1024.0/1024.0/1024.0/1024.0;
$this->{DATA}->{"eos_disk_usage"} = sprintf "%d",100.0 *$vals[1]/$vals[0];
}
close(IN);
}
}
sub readEosRpmValues {
my $this = shift;
if (open IN, "rpm -qa eos-xrootd | cut -d '-' -f3 |") {
my $all = ;
if ($all) {
chomp $all;
$all =~ s/xrootd-//;
$this->{DATA}->{"xrootd_rpm_version"} = 'v'.$all;
}
close(IN);
}
if (open IN, "rpm -qa eos-server |") {
my $all = ;
if ($all) {
chomp $all;
$all =~ s/eos-server-//;
$this->{DATA}->{"eos_rpm_version"} = $all;
}
close(IN);
}
}
sub show_call_stack {
my ( $path, $line, $subr );
my $max_depth = 30;
my $i = 1;
while ( (my @call_details = (caller($i++))) && ($i<$max_depth) ) {
print "$call_details[1] line $call_details[2] in function $call_details[3]\n";
}
}
# do a difference with overflow check and repair
# the counter is unsigned 32 or 64 bit
sub diffWithOverflowCheck {
my ($this, $new, $old) = @_;
if($new >= $old){
return $new - $old;
}
else{
return $new;
}
}
# read network information like transfered kBps and nr. of errors on each interface
# TODO: find an alternative for MAC OS X
sub readNetworkInfo {
my $this = shift;
$this->{NETWORKINTERFACES} = {};
if ($Config{osname} eq "solaris"){
my $ifname;
my $line;
if (open(NET_DEV, "ifconfig -a4 |")){
while ($line = ){
next if ($ifname);
if ($line =~ /^(\w+\d):\s+/){
next if ($line =~ /^lo/);
$ifname = $1;
}
}
close NET_DEV;
}
my $bytesIn = 0;
my $bytesOut = 0;
if (open(NET_DEV,"netstat -P tcp -s |")){
while ($line = ){
if ($line =~ /tcpOut\w+Bytes\s*=\s*(\d+)/){
$bytesOut += $1;
}
if ($line =~ /tcpRetransBytes\s*=\s*(\d+)/){
$bytesOut += $1;
}
if ($line =~ /tcpIn\w+Bytes\s*=\s*(\d+)/){
$bytesIn += $1;
}
}
close NET_DEV;
$this->{DATA}->{"raw_net_".$ifname."_in"} = $bytesIn;
$this->{DATA}->{"raw_net_".$ifname."_out"} = $bytesOut;
$this->{DATA}->{"raw_net_".$ifname."_err"} = 0;
#fake eth0 traffic, even if on Solaris the interfaces have weird names
#and moreover we cannot tell the traffic per each interface...
$this->{DATA}->{"raw_net_eth0_in"} = $bytesIn;
$this->{DATA}->{"raw_net_eth0_out"} = $bytesOut;
$this->{DATA}->{"raw_net_eth0_err"} = 0;
$this->{DATA}->{"raw_net_total_traffic_in"} = $bytesIn;
$this->{DATA}->{"raw_net_total_traffic_out"} = $bytesOut;
$this->{NETWORKINTERFACES}->{"eth0"} = "eth0";
$this->{NETWORKINTERFACES}->{"total_traffic"} = "total_traffic";
}
return;
}
if (opendir my $dh, "/sys/class/net"){
my @things = grep {$_ ne '.' and $_ ne '..' } readdir $dh;
foreach my $thing (@things) {
my $link = readlink("/sys/class/net/".$thing);
if (defined($link) && index($link, "/virtual/")<0){
$this->{NETWORKINTERFACES}->{$thing} = $thing;
}
}
}
my $total_traffic_in=0;
my $total_traffic_out=0;
if(open(NET_DEV, ") {
if($line =~ /\s*(\w+):\s*(\d+)\s+\d+\s+(\d+)\s+\d+\s+\d+\s+\d+\s+\d+\s+\d+\s+(\d+)\s+\d+\s+(\d+)/){
if ( exists($this->{NETWORKINTERFACES}->{$1}) ){
$this->{DATA}->{"raw_net_$1"."_in"} = $2;
$this->{DATA}->{"raw_net_$1"."_out"} = $4;
$this->{DATA}->{"raw_net_$1"."_errs"} = $3 + $5; # in and out errors
$total_traffic_in += $2;
$total_traffic_out += $4;
}
}
}
close NET_DEV;
}else{
logger("NOTICE", "ProcInfo: cannot open /proc/net/dev");
}
$this->{DATA}->{"raw_net_total_traffic_in" } = $total_traffic_in;
$this->{DATA}->{"raw_net_total_traffic_out"} = $total_traffic_out;
$this->{NETWORKINTERFACES}->{"total_traffic"} = "total_traffic";
}
# run nestat
# Note: this works on MAC OS X
sub readNetStat {
my $this = shift;
if(open(NETSTAT, 'env PATH=$PATH:/usr/sbin netstat -an 2>/dev/null |')){
my %sockets = map { +"sockets_$_" => 0 } ('tcp', 'udp', 'unix'); #icm will be auto added on mac
my %tcp_details = map { +"sockets_tcp_$_" => 0 }
('ESTABLISHED', 'SYN_SENT', 'SYN_RECV', 'FIN_WAIT1',
'FIN_WAIT2', 'TIME_WAIT', 'CLOSED', 'CLOSE_WAIT',
'LAST_ACK', 'LISTEN', 'CLOSING', 'UNKNOWN');
if ($Config{osname} eq "solaris"){
my $sockclass;
while (my $line = ){
if ($line =~ /^UDP:/){
$sockclass = "udp";
$line = ;
$line = ;
next;
}
if ($line =~ /^TCP:/){
$sockclass = "tcp";
$line = ;
$line = ;
next;
}
if ($line =~ /^SCTP:/){
$sockclass = "sctp";
$line = ;
$line = ;
next;
}
if ($line =~ /^Active UNIX domain sockets/){
$sockclass = "unix";
$line = ;
next;
}
chomp ($line);
if (length($line) == 0){
undef $sockclass;
next;
}
if (defined($sockclass)){
if ($sockclass eq "tcp"){
if ($line =~ /\s+(\w+)\s*$/){
$sockets{"sockets_tcp"}++;
my $state = uc($1);
if (not defined($tcp_details{"sockets_tcp_".$state})){
$tcp_details{"sockets_tcp_".$state} = 0;
}
$tcp_details{"sockets_tcp_".$state}++;
}
}
else{
if (not defined($sockets{"sockets_$sockclass"})){
$sockets{"sockets_$sockclass"} = 0;
}
$sockets{"sockets_$sockclass"}++;
}
}
}
}
else{
while (my $line = ) {
$line =~ s/\s+$//;
my $proto = ($line =~ /^([^\s]+)/ ? $1 : "");
my $state = ($line =~ /([^\s]+)$/ ? $1 : "");
$proto = "unix" if $line =~ /stream/i || $line =~ /dgram/i;
if($proto =~ /tcp/){
$sockets{"sockets_tcp"}++;
$tcp_details{"sockets_tcp_".$state}++;
}elsif($proto =~ /udp/){
$sockets{"sockets_udp"}++;
}elsif($proto =~ /icm/){
$sockets{"sockets_icm"}++;
}elsif($proto =~ /unix/){
$sockets{"sockets_unix"}++;
}
}
}
close NETSTAT;
while(my ($key, $value) = each(%sockets)){ $this->{DATA}->{$key} = $value; }
while(my ($key, $value) = each(%tcp_details)){ $this->{DATA}->{$key} = $value; }
}
else{
logger("NOTICE", "ProcInfo: cannot run netstat");
}
}
# internal function that gets the full list of children (pids) for a process (pid)
# it returns an empty list if the process has died
# Note: This works on MAC OS X
sub getChildren {
my ($this, $parent) = @_;
my @children = ();
my %pidmap = ();
if(open(PIDS, 'ps -A -o "pid ppid" |')){
$_ = ; # skip header
while(){
if(/\s*(\d+)\s+(\d+)/){
$pidmap{$1} = $2;
push(@children, $parent) if $1 == $parent;
}
}
close(PIDS);
}else{
logger("NOTICE", "ProcInfo: cannot execute ps -A -o \"pid ppid\"");
}
for(my $i = 0; $i < @children; $i++){
my $prnt = $children[$i];
while( my ($pid, $ppid) = each %pidmap ){
if($ppid == $prnt){
push(@children, $pid);
}
}
}
return @children;
}
# internal function that parses a time formatted like "days-hours:min:sec" and returns the corresponding
# number of seconds.
sub parsePSElapsedTime {
my ($this, $time) = @_;
if($time =~ /(\d+)-(\d+):(\d+):(\d+)/){
return $1 * 24 * 3600 + $2 * 3600 + $3 * 60 + $4;
}elsif($time =~ /(\d+):(\d+):(\d+)/){
return $1 * 3600 + $2 * 60 + $3;
}elsif($time =~ /(\d+):(\d+)/){
return $1 * 60 + $2;
}else{
return 0;
}
}
# internal function that parses time formatted like "Tue Feb 7 17:13:17 2006" and the returns the
# corresponding number of seconds from EPOCH
sub parsePSStartTime {
my ($this, $strTime) = @_;
if($strTime !~ /\S+\s+(\S+)\s+(\d+)\s+(\d+):(\d+):(\d+)\s+(\d+)/){
return 0;
}else{
my ($strMonth, $mday, $hour, $min, $sec, $year) = ($1, $2, $3, $4, $5, $6);
my $mon = 0;
for my $month (@{$this->{MONTHS}}){
last if $month eq $strMonth;
$mon++;
}
return timelocal($sec, $min, $hour, $mday, $mon, $year);
}
}
# read information about this the JOB_PID process
# memory sizes are given in KB
# Note: This works on MAC OS X
sub readJobInfo {
my ($this, $pid) = @_;
return unless $pid;
my @children = $this->getChildren($pid);
logger("DEBUG", "ProcInfo: Children for pid=$pid; are @children.");
if(@children == 0){
logger("INFO", "ProcInfo: Job with pid=$pid terminated; removing it from monitored jobs.");
$this->removeJobToMonitor($pid);
return;
}
if(open(J_STATUS, 'ps -A -o "pid lstart time %cpu %mem rsz vsz command" |')){
my $line = ; # skip header
my ($etime, $cputime, $pcpu, $pmem, $rsz, $vsz, $comm, $fd, $minflt, $majflt) = (0, 0, 0, 0, 0, 0, 0, undef, 0, 0);
my $cputime_offset = $this->{JOBS}->{$pid}->{DATA}->{'cpu_time_offset'} || 0;
my %mem_cmd_map = (); # this contains all $rsz_$vsz_$command as keys for every pid
# it is used to avoid adding several times processes that have multiple threads and appear in
# ps as sepparate processes, occupying exacly the same amount of memory. The reason for not adding
# them multiple times is that that memory is shared as they are threads.
my $crtTime = time();
while($line = ){
chomp $line;
$line =~ s/\s+/ /g; $line =~ s/^\s+//; $line =~ s/\s+$//;
# line looks like:
# "PID STARTED TIME %CPU %MEM RSZ VSZ COMMAND"
# "6157 Tue Feb 7 22:15:30 2006 00:00:00 0.0 0.0 428 1452 g++ -O -pipe..."
if($line =~ /(\S+) (\S+ \S+ \S+ \S+ \S+) (\S+) (\S+) (\S+) (\S+) (\S+) (.+)/){
my($apid, $stime1, $cputime1, $pcpu1, $pmem1, $rsz1, $vsz1, $comm1)
= ($1, $2, $3, abs($4), abs($5), $6, $7, $8); # % can be negative on mac!?!
my $isChild = 0;
for my $childPid (@children){
if($apid == $childPid){
$isChild = 1;
last;
}
}
next if(! $isChild);
my $sec = $crtTime - $this->parsePSStartTime($stime1);
$etime = $sec if $sec > $etime; # the elapsed time is the maximum of all elapsed
$sec = $this->parsePSElapsedTime($cputime1); # times corespornding to all child processes.
$cputime += $sec; # total cputime is the sum of cputimes for all processes.
$pcpu += $pcpu1; # total %cpu is the sum of all children %cpu.
if(! $mem_cmd_map{"$pmem1 $rsz1 $vsz1 $comm1"} ++){
# it's the first thread/process with this memory footprint; add it.
$pmem += $pmem1; $rsz += $rsz1; $vsz += $vsz1;
# the same is true for the number of opened files
my $thisFD = $this->countOpenFD($apid);
$fd += $thisFD if (defined $thisFD);
} # else not adding memory usage.
# Get the number of minor and major page faults
if(open(STAT, "/proc/$apid/stat")){
my $line = ;
my($pid, $exec, $status, $ppid, $pgrp, $sid, $tty, $tty_grp, $flags, $mflt, $cmflt, $jflt, $cjflt)
= split(/\s+/, $line);
$minflt += $mflt;
$majflt += $jflt;
}
close(STAT);
}
}
close(J_STATUS);
$cputime += $cputime_offset;
my $cputime_delta = ($this->{JOBS}->{$pid}->{DATA}->{'cpu_time'} || 0) - $cputime; # note this is the other way around!
if($cputime_delta > 0){
# Current time is lower than previous - one of the forked processes finished and
# its contribution to the cpu_time disappeared.
# We have to recalculate the cputime_offset. Note that in this case, we lose the
# cpu_time of the other processes, consumed between these two reports.
$cputime_offset += $cputime_delta;
$cputime += $cputime_delta;
}
$cputime_delta = $cputime - ($this->{JOBS}->{$pid}->{DATA}->{'cpu_time'} || 0); # real cpu time delta
my $etime_delta = $etime - ($this->{JOBS}->{$pid}->{DATA}->{'run_time'} || 0); # real elapsed time delta
my $crtCpuSpeed = $this->{DATA}->{'cpu_MHz'} || 1;
my $orgCpuSpeed = $ApMon::Common::CpuMHz || $crtCpuSpeed;
#my $freqFact = $crtCpuSpeed / $orgCpuSpeed; # if Cpu speed varies in time, adjust ksi2k factor
my $freqFact = 1;
$this->{JOBS}->{$pid}->{DATA}->{'run_time'} += $etime_delta;
$this->{JOBS}->{$pid}->{DATA}->{'run_ksi2k'} += $etime_delta * $freqFact * $ApMon::Common::KSI2K if $ApMon::Common::KSI2K;
$this->{JOBS}->{$pid}->{DATA}->{'cpu_time'} += $cputime_delta;
$this->{JOBS}->{$pid}->{DATA}->{'cpu_ksi2k'} += $cputime_delta * $freqFact * $ApMon::Common::KSI2K if $ApMon::Common::KSI2K;
$this->{JOBS}->{$pid}->{DATA}->{'cpu_time_offset'} = $cputime_offset;
$this->{JOBS}->{$pid}->{DATA}->{'cpu_usage'} = $pcpu;
$this->{JOBS}->{$pid}->{DATA}->{'mem_usage'} = $pmem;
$this->{JOBS}->{$pid}->{DATA}->{'rss'} = $rsz;
$this->{JOBS}->{$pid}->{DATA}->{'virtualmem'} = $vsz;
$this->{JOBS}->{$pid}->{DATA}->{'open_files'} = $fd if (defined $fd);
$this->{JOBS}->{$pid}->{DATA}->{'page_faults_min'} = $minflt;
$this->{JOBS}->{$pid}->{DATA}->{'page_faults_maj'} = $majflt;
}else{
logger("NOTICE", "ProcInfo: cannot run ps to see job's status for job $pid");
}
}
# count the number of open files for the given pid
# TODO: find an equivalent for MAC OS X
sub countOpenFD {
my ($this, $pid) = @_;
if ($Config{osname} eq "solaris"){
return undef;
}
if(opendir(DIR, "/proc/$pid/fd")){
my @list = readdir(DIR);
closedir DIR;
my $open_files = ($pid == $$ ? @list - 4 : @list - 2);
logger("DEBUG", "Counting open_files for $pid: |@list| => $open_files");
return $open_files;
}else{
logger("NOTICE", "ProcInfo: cannot count the number of opened files for job $pid");
}
return undef;
}
# if there is an work directory defined, then compute the used space in that directory
# and the free disk space on the partition to which that directory belongs
# sizes are given in MB
# Note: this works on MAC OS X
sub readJobDiskUsage {
my ($this, $pid) = @_;
my $workDir = $this->{JOBS}->{$pid}->{WORKDIR};
return unless $workDir and -d $workDir;
if(open(DU, "du -Lsck $workDir | tail -1 | cut -f 1 |")){
my $line = ;
if($line){
chomp $line;
$this->{JOBS}->{$pid}->{DATA}->{'workdir_size'} = $line / 1024.0;
}else{
logger("NOTICE", "ProcInfo: cannot get du output for job $pid");
}
close(DU);
}else{
logger("NOTICE", "ProcInfo: cannot run du to get job's disk usage for job $pid");
}
if(open(DF, "df -k $workDir | tail -1 |")){
my $line = ;
if($line){
chomp $line;
if($line =~ /\S+\s+(\S+)\s+(\S+)\s+(\S+)\s+(\S+)%/){
$this->{JOBS}->{$pid}->{DATA}->{'disk_total'} = $1 / 1024.0;
$this->{JOBS}->{$pid}->{DATA}->{'disk_used'} = $2 / 1024.0;
$this->{JOBS}->{$pid}->{DATA}->{'disk_free'} = $3 / 1024.0;
$this->{JOBS}->{$pid}->{DATA}->{'disk_usage'} = $4;
}
}else{
logger("NOTICE", "ProcInfo: cannot get df output for job $pid");
}
close(DF);
}else{
logger("NOTICE", "ProcInfo: cannot run df to get job's disk usage for job $pid");
}
}
# create cummulative parameters based on raw params like cpu_, blocks_, swap_, or ethX_
sub computeCummulativeParams {
my ($this, $dataRef, $prevDataRef) = @_;
if(scalar(keys %$prevDataRef) == 0){
for my $param (keys %$dataRef){
next if $param !~ /^raw_/;
$prevDataRef->{$param} = $dataRef->{$param};
}
$prevDataRef->{'TIME'} = $dataRef->{'TIME'};
return;
}
# cpu -related params
if(defined($dataRef->{'raw_cpu_usr'}) && defined($prevDataRef->{'raw_cpu_usr'})){
my %diff = ();
my $cpu_sum = 0;
for my $param ('cpu_usr', 'cpu_nice', 'cpu_sys', 'cpu_idle', 'cpu_iowait', 'cpu_irq', 'cpu_softirq', 'cpu_steal', 'cpu_guest') {
if (defined($dataRef->{"raw_$param"}) && defined($prevDataRef->{"raw_$param"})){
$diff{$param} = $this->diffWithOverflowCheck($dataRef->{"raw_$param"}, $prevDataRef->{"raw_$param"});
$cpu_sum += $diff{$param};
}
}
for my $param ('cpu_usr', 'cpu_nice', 'cpu_sys', 'cpu_idle', 'cpu_iowait', 'cpu_irq', 'cpu_softirq', 'cpu_steal', 'cpu_guest') {
if (defined($dataRef->{"raw_$param"}) && defined($prevDataRef->{"raw_$param"})){
if($cpu_sum != 0){
$dataRef->{$param} = 100.0 * $diff{$param} / $cpu_sum;
}else{
delete $dataRef->{$param};
}
}
}
if($cpu_sum != 0){
$dataRef->{'cpu_usage'} = 100.0 * ($cpu_sum - $diff{'cpu_idle'}) / $cpu_sum;
}else{
delete $dataRef->{'cpu_usage'};
}
# add the other parameters
for my $param ('interrupts', 'context_switches'){
if(defined($dataRef->{"raw_$param"}) && defined($prevDataRef->{"raw_$param"})){
$dataRef->{$param} = $this->diffWithOverflowCheck($dataRef->{"raw_$param"}, $prevDataRef->{"raw_$param"});
}
}
}
# interrupts, context switches, swap & blocks - related params
my $interval = $dataRef->{TIME} - $prevDataRef->{TIME};
for my $param ('blocks_in', 'blocks_out', 'swap_in', 'swap_out', 'interrupts', 'context_switches') {
if(defined($dataRef->{"raw_$param"}) && defined($prevDataRef->{"raw_$param"}) && ($interval != 0)){
my $diff = $this->diffWithOverflowCheck($dataRef->{"raw_$param"}, $prevDataRef->{"raw_$param"});
$dataRef->{$param."_R"} = $diff / $interval;
}else{
delete $dataRef->{$param."_R"};
}
}
# physical network interfaces - related params
for my $rawParam (keys %$dataRef){
next if $rawParam !~ /^raw_net_/;
next if ! defined($prevDataRef->{$rawParam});
my $param = $1 if($rawParam =~ /raw_net_(.*)/);
if($interval != 0){
$dataRef->{$param} = $this->diffWithOverflowCheck($dataRef->{$rawParam}, $prevDataRef->{$rawParam}); # absolute difference
$dataRef->{$param} = $dataRef->{$param} / $interval / 1024.0 if($param !~ /_errs$/); # if it's _in or _out, compute in KB/sec
}else{
delete $dataRef->{$param};
}
}
# copy contents of the current data values to the
for my $param (keys %$dataRef){
next if $param !~ /^raw_/;
$prevDataRef->{$param} = $dataRef->{$param};
}
$prevDataRef->{'TIME'} = $dataRef->{'TIME'};
}
# Return the array image of a hash with the requested parameters (from paramsRef)
# sorted alphabetically
# The cummulative parameters are computed based on $prevDataRef
# As a side effect, prevDataRef is updated to have the values in dataRef.
sub getFilteredData {
my ($this, $dataRef, $paramsRef, $prevDataRef, $networkInterfaces) = @_;
# we don't do this for jobs
$this->computeCummulativeParams($dataRef, $prevDataRef) if($prevDataRef);
my %result = ();
for my $param (@$paramsRef) {
if($param eq "net_sockets"){
for my $key (keys %$dataRef) {
$result{$key} = $dataRef->{$key} if $key =~ /sockets_[^_]+$/;
}
}elsif($param eq "net_tcp_details"){
for my $key (keys %$dataRef) {
$result{$key} = $dataRef->{$key} if $key =~ /sockets_tcp_/;
}
}elsif($param =~ /^net_(.*)$/ or $param =~ /^(ip)$/){
my $net_param = $1;
for my $key (keys %$dataRef) {
if ($key =~ /^(\w+)_$net_param/ ){
if ( exists ($networkInterfaces->{$1}) ){
$result{$key} = $dataRef->{$key};
}
}
}
}elsif($param eq "processes"){
for my $key (keys %$dataRef) {
$result{$key} = $dataRef->{$key} if $key =~ /^processes/;
}
}elsif($param =~ /blocks_|swap_|interrupts|context_switches/){
for my $key (keys %$dataRef) {
$result{$key} = $dataRef->{$key} if $key =~ /^${param}_R$/;
}
$result{$param} = $dataRef->{$param} if($param =~/^swap_/ && defined($dataRef->{$param}));
}
else{
$result{$param} = $dataRef->{$param} if defined $dataRef->{$param};
}
}
my @sorted_result = ();
for my $key (sort (keys %result)) {
push(@sorted_result, $key, $result{$key});
}
return @sorted_result;
}
######################################################################################
# Interesting functions for other modules:
# This should be called from time to time to update the monitored data,
# but not more often than once a second because of the resolution of time()
sub update {
my $this = shift;
logger("NOTICE", "ProcInfo: Collecting backgound and ".keys(%{$this->{JOBS}})." PIDs monitoring info.");
$this->readStat();
$this->readMemInfo();
$this->readUptimeAndLoadAvg();
$this->countProcesses();
$this->readNetworkInfo();
$this->readNetStat();
$this->readEosDiskValues();
$this->readEosRpmValues();
$this->{DATA}->{TIME} = time;
$this->readGenericInfo() if (($this->{readGI}++) % 2 == 0);
$this->readCPUInfo();
for my $pid (keys %{$this->{JOBS}}) {
$this->readJobInfo($pid);
$this->readJobDiskUsage($pid);
}
}
# Call this to add another PID to be monitored
sub addJobToMonitor {
my ($this, $pid, $workDir) = @_;
$this->{JOBS}->{$pid}->{WORKDIR} = $workDir;
$this->{JOBS}->{$pid}->{DATA} = {};
}
# Call this to stop monitoring a PID
sub removeJobToMonitor {
my ($this, $pid) = @_;
delete $this->{JOBS}->{$pid};
}
# Return a filtered hash containting the system-related parameters and values
sub getSystemData {
my ($this, $paramsRef, $prevDataRef) = @_;
my @ret = $this->getFilteredData($this->{DATA}, $paramsRef, $prevDataRef, $this->{NETWORKINTERFACES});
#print Dumper(@ret);
return @ret;
}
# Return a filtered hash containing the job-related parameters and values
sub getJobData {
my ($this, $pid, $paramsRef) = @_;
return $this->getFilteredData($this->{JOBS}->{$pid}->{DATA}, $paramsRef, $this->{NETWORKINTERFACES});
}
1;
================================================
FILE: ApMon/perl/ApMon/ApMon/XDRUtils.pm
================================================
package ApMon::XDRUtils;
use strict;
use warnings;
my $XDR_STRING = 0;
my $XDR_INT32 = 2;
my $XDR_REAL64 = 5;
my $MAX_INT = 1<<31;
# Encode a set of parameters in the following format:
# |clusterName | nodeName | time | #params |
# | paramName | paramType | paramValue| x #params
# and time, if != -1
sub encodeParameters {
my ($clusterName, $nodeName, $time, @params) = @_;
my $encParams = "";
for(my $i = 0; $i < $#params; $i += 2){
$encParams .= encodeParameter($params[$i], $params[$i+1]);
}
my $encTime = $time == -1 ? "" : encodeINT32($time);
return encodeString($clusterName) . encodeString($nodeName) .
encodeINT32(@params/2) . $encParams . $encTime;
}
# Encode a parameter pair (paramName, paramValue)
sub encodeParameter {
my ($name, $value) = @_;
my $type = getType($value);
my $encValue;
if ($type == $XDR_INT32) {
$encValue = encodeINT32($value);
} elsif ($type == $XDR_REAL64) {
$encValue = encodeREAL64($value);
} else {
$encValue = encodeString($value);
}
return encodeString($name).encodeINT32($type).$encValue;
}
# Return the type for a given value (XDR_INT32, XDR_REAL64, XDR_STRING)
sub getType {
$_ = shift;
return $XDR_INT32 if(/^[+-]?\d+$/ && (abs($_) < $MAX_INT));
return $XDR_REAL64 if /^([+-]?)(?=\d|\.\d)\d*(\.\d*)?([Ee]([+-]?\d+))?$/;
return $XDR_STRING;
}
# Encode a string in XDR format
sub encodeString {
my $str = shift;
my $enc = encodeINT32(length($str));
while (length($str) % 4 != 0){
$str .= "\0";
}
return $enc.$str;
}
# Encode a 32 bit signed integer in XDR format
sub encodeINT32 {
my $val = shift;
return pack("N", int($val));
}
# Encode a 64 bit double in XDR format
sub encodeREAL64 {
my $val = shift;
my $end = verifyEndian();
if ($end == 0) {
return reverse(pack("d",$val));
} else {
return pack("d",$val);
}
}
# Verify if machine is big-endian or little-endian
sub verifyEndian {
my $foo = pack("s2",1,2);
if ($foo eq "\1\0\2\0" ) {
return 0;
} elsif ( $foo eq "\0\1\0\2" ) {
return 1;
}
}
1;
================================================
FILE: ApMon/perl/ApMon/ApMon.pm
================================================
=head1 NAME
ApMon - Perl extension for sending application information to MonALISA services.
=head1 SYNOPSIS
use ApMon;
# initialize from a URL or from a file
my $apm = new ApMon::ApMon("http://some.host.com/destinations.conf");
$apm->sendParameters("Cluster", "Node", "param1", 14.23e-10, "param2", 234);
# initalize with default xApMon configuration, and send datagrams directly
# to the given host.
my $apm = ApMon::ApMon->new(["pcardaab.cern.ch:8884", "localhost"]);
$apm->sendParameters("Cluster", "Node", {"x" => 12, "y" => 0.3});
# given xApMon configuration will overwrite the default values.
my $apm = ApMon::ApMon->new({
"pcardaab.cern.ch:8884" =>
{"sys_monitoring" => 0, "job_monitoring" => 1, "general_info" => 1},
"lcfg.rogrid.pub.ro passwd" =>
{"sys_monitoring" => 1, "general_info" => 0}
});
$apm->sendParameters("Cluster", "Node", ["name", "some_name", "value", 23]);
=head1 DESCRIPTION
ApMon is an API that can be used by any application to send monitoring
information to MonALISA services (http://monalisa.cacr.caltech.edu). The
monitoring data is sent as UDP datagrams to one or more hosts running MonALISA.
The MonALISA host may require a password enclosed in each datagram, for
authentication purposes. ApMon can also send datagrams that contain monitoring
information regarding the system or the application.
=head1 METHODS
=over
=cut
package ApMon;
use strict;
use warnings;
use ApMon::Common qw(logger);
use ApMon::ConfigLoader;
use ApMon::BgMonitor;
use IO::Handle;
use POSIX ":sys_wait_h";
use Net::Domain;
use Data::Dumper;
# Here it is kept a list of child processes that have to be killed before finishing.
my @children = ();
# Temporary files path
my $TMPDIR = (defined $ENV{'TMPDIR'}) ? $ENV{'TMPDIR'} : '/tmp';
=item $apm = new ApMon(@destLocations);
This is the constructor for the ApMon class. It can be used with several types of
arguments: a list of strings (URLs and/or files) - the configuration will be
read from all; a reference to an ARRAY - each element is a destination ML
service; for each destination the default options will be used; a reference
to a HASH - each key is a destination ML service; for each destination you can
define a set of additional options that will overwrite the default ones. You
can also leave it empty and initialize ApMon later using the
$apm->setDestinations() method. This will create the two background processes
(for bg monitoring and configuration update). If you don't want these two
processes to be created ever, you can pass the value 0 as single argument.
=cut
sub new {
my ($type, @destLocations) = @_;
my $this = {};
bless $this;
$this->{CONF_FILE} = "$TMPDIR/confApMon.$$"; # temporary name used to transfer config data from refreshConfig process to the others
$this->{LAST_VALUES_FILE} = "$TMPDIR/valuesApMon.$$"; #temporary name used to transfer last monitored data from BgMonitor to the main process
$this->{LAST_CONF_CHECK_TIME} = 0; # moment when config was checked last time in sec from Epoch
$this->{CONF_RECHECK} = 1; # do check if configuration has changed
$this->{CONF_CHECK_INTERVAL} = 20; # default interval to check for changes in config files
$this->{DESTINATIONS} = {};
my $hostname = Net::Domain::hostfqdn();
$this->{DEFAULT_CLUSTER} = "ApMon_UserSend";
$this->{DEFAULT_NODE} = $hostname;
# decide if we will ever have bg processes
if( @destLocations && ref($destLocations[0]) eq "" && $destLocations[0] eq "0" ){
$this->{ALLOW_BG_PROCESSES} = 0;
@destLocations = ();
}else{
$this->{ALLOW_BG_PROCESSES} = 1;
}
pipe($this->{UPD_RDR}, $this->{UPD_WTR}); # open a pipe to send messages to Config Loader
$this->{UPD_WTR}->autoflush(1);
$this->{CONFIG_LOADER} = new ApMon::ConfigLoader($this->{UPD_RDR}, $this->{CONF_FILE});
pipe($this->{BG_RDR}, $this->{BG_WTR}); # open a pipe to send messages to Background Monitor
$this->{BG_WTR}->autoflush(1);
$this->{BG_MONITOR} = new ApMon::BgMonitor($this->{BG_RDR}, $this->{CONF_FILE}, $this->{LAST_VALUES_FILE}, $this->{ALLOW_BG_PROCESSES}, $this->{CONFIG_LOADER});
# if the configuration is given in the constructor, load it now
$this->setDestinations(@destLocations) if @destLocations;
$SIG{INT} = \&catch_zap;
$SIG{TERM} = \&catch_zap;
return $this;
}
=item $apm->setDestinations(@destLocations);
Accept the same parameters as the ApMon constructor
=cut
sub setDestinations {
my ($this, @destLocations) = @_;
$this->startBgProcesses();
#logger("INFO", "\$destLocations[0]= .$destLocations[0]. ref = .".ref($destLocations[0]).".");
if((ref($destLocations[0]) eq "ARRAY") or (ref($destLocations[0]) eq "HASH")) {
# prevent background Config Loader to change this
#logger("INFO", "Config is HASH or ARRAY");
ApMon::Common::writeMessage($this->{UPD_WTR}, "conf_recheck:0\n") if @children;
}else{
#logger("INFO", "Config is string = .@destLocations.");
my $msg = "conf_recheck:1\n";
for my $dest (@destLocations) {
$msg .= "dest:$dest\n";
}
# send this to background Config Loader for later updates
ApMon::Common::writeMessage($this->{UPD_WTR}, $msg) if @children;
}
# perform the change now, regardless of the existence of background Config Loader
$this->{CONFIG_LOADER}->setDestinations(@destLocations);
$this->enableBgMonitoring(1);
}
=item $apm->addJobToMonitor($pid, $workDir, $clusterName, $nodeName);
Add another job to be monitored. A job is a tree of processes, starting from
the given PID that has files in workDir directory. If workDir in "", no disk
measurements will be performed. All produced parameters will be sent to all
interested destinations using the given cluster and node names.
=cut
sub addJobToMonitor {
my ($this, $pid, $workDir, $clusterName, $nodeName) = @_;
ApMon::Common::writeMessage($this->{BG_WTR}, "pid:$pid\nwork_dir:$workDir\nbg_cluster:$clusterName\nbg_node:$nodeName\n") if @children;
# also set this to the local copy of the BG_MONITOR in case that user decides to stop background processes
$this->{BG_MONITOR}->addJobToMonitor($pid, $workDir, $clusterName, $nodeName);
}
=item $apm->removeJobToMonitor($pid);
Remove a tree of processes, starting with PID from being monitored.
=cut
sub removeJobToMonitor {
my ($this, $pid) = @_;
ApMon::Common::writeMessage($this->{BG_WTR}, "rm_pid:$pid\n") if @children;
# also set this to the local copy of the BG_MONITOR in case that user decides to stop background processes
$this->{BG_MONITOR}->removeJobToMonitor($pid);
}
=item $apm->setMonitorClusterNode($clusterName, $nodeName);
This is used to set the cluster and node name for the system-related monitored
data.
=cut
sub setMonitorClusterNode {
my ($this, $clusterName, $nodeName) = @_;
ApMon::Common::writeMessage($this->{BG_WTR}, "bg_cluster:$clusterName\nbg_node:$nodeName\n") if @children;
# also set this to the local copy of the BG_MONITOR in case that user decides to stop background processes
$this->{BG_MONITOR}->setMonitorClusterNode($clusterName, $nodeName);
}
=item $apm->setConfRecheck($onOff [, $interval]);
Call this function in order to enable or disable the configuration recheck.
If you enable it, you may want to pass a second parameter, that specifies the
number of seconds between two configuration rechecks. Note that it makes sense
to use configuration recheck only if you get the configuration from (a set of)
files and/or URLs.
=cut
sub setConfRecheck {
my $this = shift;
my $onOff = shift;
my $interval = shift || 120;
$this->{CONF_RECHECK} = $onOff;
$this->{CONF_CHECK_INTERVAL} = $interval;
ApMon::Common::writeMessage($this->{UPD_WTR}, "conf_recheck:$onOff\nrecheck_interval:$interval\n") if @children;
}
=item $apm->sendParams(@params);
Use this to send a set of parameters without specifying a cluster and a node
name. In this case, the default values for cluster and node name will be used.
See the sendParameters function for more details.
=cut
sub sendParams {
my ($this, @params) = @_;
$this->sendTimedParams(-1, @params);
}
=item $apm->sendParameters($clusterName, $nodeName, @params);
Use this to send a set of parameters to all given destinations.
The default cluster an node names will be updated with the values given here.
If afterwards you want to send more parameters, you can use the shorter version
of this function, sendParams. The parameters to be sent can be eiter a list, or
a reference to a list. This list should have an even length and should contain
pairs like (paramName, paramValue). paramValue can be a string, an int or a float.
=cut
sub sendParameters {
my ($this, $clusterName, $nodeName, @params) = @_;
$this->sendTimedParameters($clusterName, $nodeName, -1, @params);
}
=item $apm->sendTimedParams($time, @params);
This is the short version of the sendTimedParameters that uses the default
cluster and node name to sent the parameters and allows you to specify a time
(in seconds from Epoch) for each packet.
=cut
sub sendTimedParams {
my ($this, $time, @params) = @_;
$this->sendTimedParameters($this->{DEFAULT_CLUSTER}, $this->{DEFAULT_NODE}, $time, @params);
}
=item $apm->sendTimedParameters($clusterName, $nodeName, $time, @params);
Use this instead of sendParameters to set the time for each packet that is sent.
The time is in seconds from Epoch. If you use the other function, the time for
these parameters will be sent by the MonALISA serice that receives them.
=cut
sub sendTimedParameters {
my ($this, $clusterName, $nodeName, $time, @params) = @_;
ApMon::Common::updateConfig($this);
if((! defined($clusterName)) || (! defined($nodeName))){
logger("WARNING", "ClusterName or NodeName are undefined. Not sending given parameters!");
return;
}
$this->{DEFAULT_CLUSTER} = $clusterName;
$this->{DEFAULT_NODE} = $nodeName;
if(scalar (keys %{$this->{DESTINATIONS}})){
for my $dest (keys %{$this->{DESTINATIONS}}){
ApMon::Common::directSendParameters($dest, $clusterName, $nodeName, $time, \@params);
}
}else{
logger("WARNING", "No destinations defined for sending parameters");
}
}
=item $apm->sendBgMonitoring();
Send NOW the background monitoring information to the interested destinations.
Note that this uses the current process and not the background one. So, if you
stop the background processes you can still use this call to send the
background information (both about system and jobs) whenever you want. If $mustSend is != 0,
the bgMonitoring data is sent regardles of when it was last time sent. This allows
sending a 'last result', just before the end of a job, and which can happen anytime.
=cut
sub sendBgMonitoring {
my $this = shift;
my $mustSend = shift || 0;
$this->{BG_MONITOR}->sendBgMonitoring($mustSend);
}
=item $apm->getSysMonInfo('param_name1', 'param_name2', ...);
IF and ONLY IF sendBgMonitoring() was called before, either called by user or by the BgMonitoring process,
the last system monitored values for the requested parameters will be returned. Note that the requested
parameters must be among the monitored ones. If there is no avaialbe parameter among the requested ones,
it returns undef.
=cut
sub getSysMonInfo {
my $this = shift;
$this->update_last_values();
return $this->filter_params($this->{LAST_VALUES}->{BG_MON_VALUES}, @_);
}
=item $apm->getJobMonInfo($pid, 'param_name1', 'param_name2', ...);
IF and ONLY IF sendBgMonitoring() was called before, either called by user or by the BgMonitoring process,
the last job monitored values for the given PID will be returned. Note that the requested parameters
must be among the monitored ones. If there is no avaialbe parameter among the requested ones,
it returns undef.
=cut
sub getJobMonInfo {
my $this = shift;
my $pid = shift;
$this->update_last_values();
return $this->filter_params($this->{LAST_VALUES}->{JOBS}->{$pid}->{BG_MON_VALUES}, @_);
}
=item $apm->enableBgMonitoring($onOff);
This allows enabling and disabling of the background monitoring. Note that this
doesn't stop the background monitor process; Note also that this is called by
default by setDestinations () to enable the background monitoring once the
destination is set. It doesn't make sense to call this if you have stopped
the background processes.
=cut
sub enableBgMonitoring {
my ($this, $onOff) = @_;
ApMon::Common::writeMessage($this->{BG_WTR}, "bg_enable:$onOff\n") if @children;
}
=item $apm->refreshConfig();
Call this function to force refreshing the temporary config file and make sure
that at the next send, the new configuration will be used. Note that it makes
sense to use this only if you load the configuration from (a set of) files
and/or URLs. Also note that fetching the configuration files from an URL might
take some time, depending on network conditions.
=cut
sub refreshConfig {
my $this = shift;
$this->{LAST_CONF_CHECK_TIME} = 0;
$this->{CONFIG_LOADER}->refreshConfig();
}
=item $apm->startBgProcesses();
This can be called in order to start the background processes (conf loader
and bg monitor). It is called by default if configuration is read from a
file or from a URL (not when you give a hash or an array for destinations).
=cut
sub startBgProcesses {
my $this = shift;
if(! $this->{ALLOW_BG_PROCESSES}){
logger("DEBUG", "Not starting bg processes since they are not allowed.");
return;
}
if(@children){
logger("INFO", "Bg processes already started!");
return;
}
logger("INFO", "starting bg processes");
my $pid;
# start the Config Loader process and retrieve the config periodically
$pid = fork();
if(! defined $pid){
logger("FATAL", "cannot fork: $!"); die;
}
if ($pid == 0) {
# child
$this->{CONFIG_LOADER}->run();
exit(0);
}
# parent
push(@children, $pid);
undef $pid;
# start the Background Monitoring process
$pid = fork();
if(! defined $pid){
logger("FATAL", "cannot fork: $!"); die;
}
if($pid == 0) {
# child
$this->{BG_MONITOR}->run();
exit(0);
}
# parent
push(@children, $pid);
}
=item $apm->stopBgProcesses();
This can be called to stop all child processes
=cut
sub stopBgProcesses {
my $this = shift;
for my $pid (@children) {
kill 1, $pid;
waitpid($pid, 0);
}
@children = ();
}
=item $apm->setLogLevel($level);
This sets the logging level for all ApMon components.
$level can be one of: "DEBUG", "NOTICE", "INFO", "WARNING", "ERROR", "FATAL".
You can also set the log level from the configuration file by specifying
xApMon_loglevel = one of the above (without quotes).
=cut
sub setLogLevel {
my ($this, $level) = @_;
ApMon::Common::setLogLevel($level);
ApMon::Common::writeMessage($this->{UPD_WTR}, "loglevel:$level\n") if @children;
$this->{CONFIG_LOADER}->setLogLevel($level);
ApMon::Common::writeMessage($this->{BG_WTR}, "loglevel:$level\n") if @children;
$this->{BG_MONITOR}->setLogLevel($level);
}
=item $apm->setMaxMsgRate($rate);
This sets the maxim number of messages that can be sent to a MonALISA service, per second.
By default, it is 50. This is a very large number, and the idea is to prevent errors from
the user. One can easily put in a for loop, without any sleep, some sendParams calls that
can generate a lot of unnecessary network load.
=cut
sub setMaxMsgRate {
my ($this, $rate) = @_;
ApMon::Common::setMaxMsgRate($rate);
ApMon::Common::writeMessage($this->{UPD_WTR}, "maxMsgRate:$rate\n") if @children;
$this->{CONFIG_LOADER}->setMaxMsgRate($rate);
ApMon::Common::writeMessage($this->{BG_WTR}, "maxMsgRate:$rate\n") if @children;
$this->{BG_MONITOR}->setMaxMsgRate($rate);
}
=item $apm->getCpuType();
This returns a hash with the cpu type: cpu_model_name, cpu_MHz, cpu_cache (in KB). This call
is meant to be used together with setCpuSI2k, to establish a SpecInt performance meter.
If it cannot get the cpu type, it returns undef
=cut
sub getCpuType {
my $this = shift;
my $cpuType = ApMon::Common::getCpuType();
ApMon::Common::writeMessage($this->{BG_WTR}, "cpu_mhz:$ApMon::Common::CpuMHz\n") if(@children && $ApMon::Common::CpuMHz);
return $cpuType;
}
=item $apm->setCpuSI2k(si2k);
This sets the SpecINT2000 meter for the current machine. Consequently, jobs will also report
cpu_ksi2k, based on this value and cpu_time.
=cut
sub setCpuSI2k {
my ($this, $si2k) = @_;
ApMon::Common::setCpuSI2k($si2k);
ApMon::Common::writeMessage($this->{BG_WTR}, "cpu_si2k:$si2k\n") if @children;
}
=item $apm->free();
This function stops the background processes and removes the temporary file. After this
call, the ApMon object must be recreated in order to be used. It is provided for exceptional
cases when you have to recreate over and over again the ApMon object; you have to free it
when you don't need anymore.
=cut
sub free {
my $this = shift;
$this->stopBgProcesses();
#close(ApMon::Common::SOCKET);
unlink("$TMPDIR/confApMon.$$");
unlink("$TMPDIR/valuesApMon.$$");
}
##################################################################################################
# The following is internal stuff.
# This is called if uses presses CTRL+C or kill is sent to me
sub catch_zap {
logger("DEBUG", "Killed! Removing temp files $TMPDIR/{conf,values}ApMon.$$") if defined &logger;
unlink("$TMPDIR/confApMon.$$");
unlink("$TMPDIR/valuesApMon.$$");
stopBgProcesses("dummy");
exit(0);
}
# from the given hash, based on the givn list of parameters, build a hash will all available
# if the resulting list is empty, return undef.
sub filter_params {
my $this = shift;
my $h_src = shift || {};
my $h_res = {};
for my $key (@_){
$h_res->{$key} = $h_src->{$key} if defined($h_src->{$key});
}
return (scalar(keys(%$h_res)) == 0 ? undef : $h_res);
}
# Update the last bg monitoring values hash with the contents of the LAST_VALUES_FILE.
# Note that this is produced only after sendBgMonitoring was run, either from the main
# process or the BgMonitor process.
sub update_last_values {
my $this = shift;
my $now = time;
return if $this->{LAST_VALUES_TIME} && ($now - $this->{LAST_VALUES_TIME} < 2);
if(open(F, "<$this->{LAST_VALUES_FILE}")){
my @lines = ;
close F;
my $VAR1;
$this->{LAST_VALUES} = eval join("", @lines);
logger("ERROR", "Error restoring the last bg monitoring values from file $this->{LAST_VALUES_FILE}:\n$@") if $@;
$this->{LAST_VALUES_TIME} = $now;
}else{
logger("WARNING", "Cannot read the last bg monitoring values from $this->{LAST_VALUES_FILE}");
}
}
END {
unlink("$TMPDIR/confApMon.$$");
unlink("$TMPDIR/valuesApMon.$$");
stopBgProcesses("dummy");
}
1;
__END__
=back
=head1 AUTHOR
Catalin Cirstoiu
=head1 COPYRIGHT AND LICENSE
This module is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND,
either expressed or implied. This library is free software; you can
redistribute or modify it under the same terms as Perl itself.
=cut
================================================
FILE: ApMon/perl/ApMon/sendToML.sh
================================================
#!/bin/bash
# Sample shell script that sends the given parameters to the ML service
# running on the same machine using ApMon.
#
# 2007-04-03
# Catalin.Cirstoiu@cern.ch
if [ $# -lt 2 ] ; then
cat < {loglevel => 'NOTICE'}}"
MONALISA_HOST=${MONALISA_HOST:-"localhost"}
APMON_DEBUG_LEVEL=${APMON_DEBUG_LEVEL:-"WARNING"}
destination=${APMON_CONFIG:-"['$MONALISA_HOST']"}
#Finally, run the perl interpreter with a small program that sends all these parameters
exe="use strict;
use warnings;
use ApMon;
my \$apm = new ApMon(0);
\$apm->setLogLevel('$APMON_DEBUG_LEVEL');
\$apm->setDestinations($destination);
\$apm->sendParameters($params);"
#echo "Exe = [$exe]"
export PERL5LIB=`dirname $0`
echo $exe | perl
================================================
FILE: ApMon/perl/ApMon/servMon.sh
================================================
#!/bin/bash
# Sample shell script that provides host (and services) monitoring with ApMon.
#
# 2007-06-07
# Catalin.Cirstoiu@cern.ch
usage(){
cat </dev/null`
lines=`ps -p $pid 2>/dev/null | wc -l`
if [ "$lines" -eq 2 ] ; then
# there is a previous ApMon instance running
if [ -n "$force" ] ; then
echo "Killing previous ApMon instance with pid $pid ..."
kill -s 1 $pid 2>/dev/null ; sleep 1
lines=`ps -p $pid 2>/dev/null | wc -l`
if [ "$lines" -eq 2 ] ; then
echo "Failed killing ApMon instance with pid $pid! Trying with -9..."
kill -s 9 $pid 2>/dev/null ; sleep 1
lines=`ps -p $pid 2>/dev/null | wc -l`
if [ "$lines" -eq 2 ] ; then
echo "Failed killing -9 ApMon instance with pid $pid!!! Aborting."
exit -1
fi
fi
else
# force flag is not set; just exit
exit 1
fi
fi
fi
if [ -n "$justKill" ] ; then
exit 0;
fi
#Set the destination for the monitoring information
#destination="\"http://monalisa2.cern.ch/~catac/apmon/destinations.conf\""
#destination="['pcardaab.cern.ch:8884']"
#destination="{'pcardaab.cern.ch' => {loglevel => 'NOTICE'}}"
MONALISA_HOST=${MONALISA_HOST:-"localhost"}
APMON_DEBUG_LEVEL=${APMON_DEBUG_LEVEL:-"WARNING"}
destination=${APMON_CONFIG:-"['$MONALISA_HOST']"}
#Finally, run the perl interpreter with a small program that sends all these parameters
exe="use strict;
use warnings;
use ApMon;
my \$apm = new ApMon(0);
\$apm->setLogLevel('$APMON_DEBUG_LEVEL');
\$apm->setDestinations($destination);
\$apm->setMonitorClusterNode('${hostGroup}_Nodes', '$host');$srvMonCmds
while(1){
\$apm->sendBgMonitoring();
sleep(120);
}
"
#echo "Exe = [$exe]"
export PERL5LIB=`dirname $0`
if [ -n "$pidfile" ] ; then
# pid file given; run in background
logfile="`dirname $pidfile`/`basename $pidfile .pid`.log"
echo -e "`date` Starting ApMon in background mode...\nlogfile in: $logfile\npidfile in: $pidfile" | tee $logfile
perl -e "$exe" > $logfile 2>&1 &
pid=$!
echo $pid > $pidfile
else
# pid file not given; run in interactive mode
echo -e "`date` Starting ApMon in interactive mode..."
exec perl -e "$exe"
fi
================================================
FILE: ApMon/run.sh
================================================
#!/bin/sh
[ -f /etc/sysconfig/eos ] && . /etc/sysconfig/eos
[ -f /etc/sysconfig/eos_env ] && . /etc/sysconfig/eos_env
cleanup() {
# kill all subprocesses
for pid in $(ps --ppid $$ --forest -o pid --no-headers); do
kill $pid &> /dev/null
done
exit 0
}
trap cleanup SIGINT SIGTERM
if [ -z "${MONALISAHOST}" ]; then
echo "error: please configure the MONALISAHOST variable in /etc/sysconfig/eos first!"
exit 1
fi
eosuser=daemon
xrdpid=$(pgrep -u "${eosuser}" xrootd | head -1)
if [ -z "${xrdpid}" ]; then
xrdpid=999999
fi
export PERL5LIB=$(perl -V:installsitearch | cut -d "'" -f 2)/ApMon
runuser -u ${eosuser} -- /opt/eos/apmon/eosapmond ${MONALISAHOST} /var/log/eos/apmon/apmon.log ${APMON_DEBUG_LEVEL:-"WARNING"} ${APMON_INSTANCE_NAME:-"unconfigured"} ${HOSTNAME} ${xrdpid} &
wait
================================================
FILE: ApMon/usr/lib/systemd/system/eosapmond.service
================================================
# systemd service unit file for eosapmond
# Author: Gianmaria Del Monte
# Description: Systemd service to start apmon for monitoring xrootd on EOS
[Unit]
Description=Starts apmon to monitor xrootd on EOS
After=network.target
StartLimitBurst=3
StartLimitIntervalSec=5
[Service]
ExecStart=/opt/eos/apmon/run.sh
User=root
Restart=always
[Install]
WantedBy=multi-user.target
================================================
FILE: CMakeLists.txt
================================================
# ----------------------------------------------------------------------
# File: CMakeLists.txt
# Author: Andreas-Joachim Peters - CERN
# ----------------------------------------------------------------------
# ************************************************************************
# * EOS - the CERN Disk Storage System *
# * Copyright (C) 2011 CERN/Switzerland *
# * *
# * This program is free software: you can redistribute it and/or modify *
# * it under the terms of the GNU General Public License as published by *
# * the Free Software Foundation, either version 3 of the License, or *
# * (at your option) any later version. *
# * *
# * This program is distributed in the hope that it will be useful, *
# * but WITHOUT ANY WARRANTY; without even the implied warranty of *
# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
# * GNU General Public License for more details. *
# * *
# * You should have received a copy of the GNU General Public License *
# * along with this program. If not, see .*
# ************************************************************************
cmake_minimum_required (VERSION 3.16...3.30 FATAL_ERROR)
# Set default build type if not set. This must be done before calling project()
if(NOT CMAKE_BUILD_TYPE AND NOT GENERATOR_IS_MULTI_CONFIG)
if(NOT CMAKE_C_FLAGS AND NOT CMAKE_CXX_FLAGS)
set(CMAKE_BUILD_TYPE RelWithDebInfo CACHE STRING
"Build type: Debug Release RelWithDebInfo MinSizeRel None(use CMAKE_CXX_FLAGS)")
endif()
endif()
project(eos DESCRIPTION "EOS Open Storage" LANGUAGES C CXX ASM)
# Insert cmake/ before everything else in the CMake module path
list(INSERT CMAKE_MODULE_PATH 0 "${PROJECT_SOURCE_DIR}/cmake")
option(CCACHE "Use ccache for compilation" ON)
if(CCACHE)
find_program(CCACHE_COMMAND ccache ccache-swig)
mark_as_advanced(CCACHE_COMMAND ${CCACHE_COMMAND})
if(EXISTS ${CCACHE_COMMAND})
message(VERBOSE "Found ccache: ${CCACHE_COMMAND}")
set_property(GLOBAL PROPERTY RULE_LAUNCH_COMPILE ${CCACHE_COMMAND})
else()
message(VERBOSE "Could NOT find ccache")
set(CCACHE OFF CACHE BOOL "Use ccache for compilation (disabled)" FORCE)
endif()
endif()
#-------------------------------------------------------------------------------
# Activate include-what-you-use
#-------------------------------------------------------------------------------
option(ENABLE_IWYU "Enable include-what-you-use tool" OFF)
if(ENABLE_IWYU)
find_program(IWYU_PATH NAMES include-what-you-use iwyu)
if(NOT IWYU_PATH)
message(FATAL_ERROR "Could not find include-what-you-use")
endif()
set(CMAKE_CXX_INCLUDE_WHAT_YOU_USE ${IWYU_PATH})
set(CMAKE_C_INCLUDE_WHAT_YOU_USE ${IWYU_PATH})
endif()
#-------------------------------------------------------------------------------
# Include code coverage module
#-------------------------------------------------------------------------------
option(COVERAGE "Build with test coverage reporting" OFF)
if (COVERAGE)
include(EosCoverage)
endif()
#-------------------------------------------------------------------------------
# Include generic functions and compiler definition parameters
#-------------------------------------------------------------------------------
if (CMAKE_INSTALL_PREFIX_INITIALIZED_TO_DEFAULT)
set(CMAKE_INSTALL_PREFIX "/usr" CACHE PATH "Default install prefix: /usr" FORCE)
endif ()
include(EosUtils)
EOS_CheckOutOfSourceBuild()
EOS_GetUidGid("daemon" "DAEMONUID" "DAEMONGID")
EOS_GetVersion("${VERSION_MAJOR}" "${VERSION_MINOR}" "${VERSION_PATCH}" "${RELEASE}")
include(EosOSDefaults)
if (NOT PACKAGEONLY)
include(EosCompileFlags)
endif()
set(CMAKE_INSTALL_SYSCONFDIR /etc)
include(EosFindLibs)
include(CTest)
#-------------------------------------------------------------------------------
# Make gtest / gmock available for all downstream CMakeLists.txt that need it
#-------------------------------------------------------------------------------
option(USE_SYSTEM_GTEST "Use GoogleTest installed in the system if found" OFF)
if(USE_SYSTEM_GTEST)
find_package(GTest REQUIRED)
else()
add_subdirectory(unit_tests/googletest EXCLUDE_FROM_ALL)
# Add alias libraries to emulate same behavior as external GoogleTest
add_library(GTest::GTest ALIAS gtest)
add_library(GTest::Main ALIAS gtest_main)
endif()
#-------------------------------------------------------------------------------
# Generate documentation
#-------------------------------------------------------------------------------
if (Python3_Interpreter_FOUND AND SPHINX_FOUND)
add_custom_target(doc
COMMAND python3 generate_docs.py
WORKING_DIRECTORY "${PROJECT_SOURCE_DIR}/doc"
COMMENT "Build HTML documentation with Sphinx ...")
endif ()
#-------------------------------------------------------------------------------
# Generate man pages
#-------------------------------------------------------------------------------
if (BUILD_MANPAGES AND HELP2MAN_FOUND)
add_subdirectory(man)
endif()
#-------------------------------------------------------------------------------
# Build qclient static library
#-------------------------------------------------------------------------------
include_directories(${CMAKE_SOURCE_DIR})
add_subdirectory(common)
add_subdirectory(proto)
add_subdirectory(fst)
add_subdirectory(console)
add_subdirectory(fusex)
add_subdirectory(misc)
add_subdirectory(test)
add_subdirectory(namespace/ns_quarkdb/qclient)
if (NOT CLIENT)
add_subdirectory(client)
add_subdirectory(mgm)
add_subdirectory(namespace)
add_subdirectory(utils)
add_subdirectory(archive)
add_subdirectory(auth_plugin)
add_subdirectory(unit_tests)
add_subdirectory(quarkdb)
endif ()
#-------------------------------------------------------------------------------
# Uninstall target
#-------------------------------------------------------------------------------
configure_file(
"${CMAKE_CURRENT_SOURCE_DIR}/cmake/cmake_uninstall.cmake.in"
"${CMAKE_CURRENT_BINARY_DIR}/cmake/cmake_uninstall.cmake"
IMMEDIATE @ONLY)
add_custom_target(
uninstall
"${CMAKE_COMMAND}" -P "${CMAKE_CURRENT_BINARY_DIR}/cmake/cmake_uninstall.cmake")
#-------------------------------------------------------------------------------
# Packaging
#-------------------------------------------------------------------------------
set(CPACK_SOURCE_GENERATOR "TGZ")
set(CPACK_PACKAGE_NAME "${CMAKE_PROJECT_NAME}")
set(CPACK_PACKAGE_VERSION "${VERSION}")
set(CPACK_PACKAGE_VERSION_MAJOR "${VERSION_MAJOR}")
set(CPACK_PACKAGE_VERSION_MINOR "${VERSION_MINOR}")
set(CPACK_PACKAGE_VERSION_PATCH "${VERSION_PATCH}")
set(CPACK_PACKAGE_RELEASE "${RELEASE}")
set(CPACK_SOURCE_PACKAGE_FILE_NAME "${CMAKE_PROJECT_NAME}-${CPACK_PACKAGE_VERSION}-${CPACK_PACKAGE_RELEASE}")
set(CPACK_SOURCE_IGNORE_FILES
"${CMAKE_CURRENT_BINARY_DIR};/ApMon/;/git/;/gitlab-ci/;/ccache/;/xrootd-dsi/;/nginx/;/dsi/;\
;/grpc/eos-grpc.spec;/.deps/;~$;'.'o$;/lib/;/.git/;eos.spec.in;elrepopackage.spec;.tar.gz$;\
.tar.bz2$;${CPACK_SOURCE_IGNORE_FILES};")
set(EOS_TUI_VERSION "0.2.3")
configure_file(
"${CMAKE_CURRENT_SOURCE_DIR}/cmake/config_spec.cmake.in"
"${CMAKE_CURRENT_BINARY_DIR}/cmake/config_spec.cmake" @ONLY IMMEDIATE)
add_custom_command(
OUTPUT "${CMAKE_CURRENT_SOURCE_DIR}/eos.spec"
COMMAND ${CMAKE_COMMAND} -P "${CMAKE_CURRENT_BINARY_DIR}/cmake/config_spec.cmake"
DEPENDS "${CMAKE_CURRENT_BINARY_DIR}/cmake/config_spec.cmake"
"${CMAKE_CURRENT_SOURCE_DIR}/eos.spec.in")
add_custom_target(
dist
COMMAND ${CMAKE_MAKE_PROGRAM} package_source
DEPENDS "${CMAKE_CURRENT_SOURCE_DIR}/eos.spec")
add_custom_command(
TARGET dist POST_BUILD
COMMAND rm ARGS -rf "${CMAKE_CURRENT_SOURCE_DIR}/eos.spec"
COMMENT "Clean generated spec file")
include(CPack)
#-------------------------------------------------------------------------------
# Source and binary rpms
#-------------------------------------------------------------------------------
set(EOS_ARCHIVE "${CMAKE_PROJECT_NAME}-${CPACK_PACKAGE_VERSION}-${CPACK_PACKAGE_RELEASE}.tar.gz")
set(SRPM_DEFINE --define "_source_filedigest_algorithm md5" --define "_binary_filedigest_algorithm md5")
if (CMAKE_CXX_COMPILER_ID MATCHES "Clang" OR CLANG_BUILD)
LIST(APPEND RPM_OPTIONS --with clang)
endif()
if (NOT CLIENT)
LIST(APPEND RPM_OPTIONS --with server)
if (EOS_XROOTD)
LIST(APPEND RPM_OPTIONS --with eos_xrootd_rh)
endif()
endif()
if (ASAN)
LIST(APPEND RPM_OPTIONS --with asan)
endif()
if (TSAN)
LIST(APPEND RPM_OPTIONS --with tsan)
endif()
option(NO_SSE "Build without sse instruction set" OFF)
if (NO_SSE)
LIST(APPEND RPM_OPTIONS --with no_sse)
endif()
option(EOS_GRPC_GW "Build without eos grpc support" OFF)
if (EOS_GRPC_GW)
LIST(APPEND RPM_OPTIONS --with eos_grpc_gateway)
endif()
add_custom_target(
srpm
COMMAND rpmbuild -ts ${EOS_ARCHIVE} --define "_topdir ${CMAKE_BINARY_DIR}" ${SRPM_DEFINE} ${RPM_OPTIONS})
add_custom_target(
rpm
COMMAND rpmbuild -tb ${EOS_ARCHIVE} --define "_topdir ${CMAKE_BINARY_DIR}" ${RPM_OPTIONS})
add_dependencies(srpm dist)
add_dependencies(rpm dist)
include(EosTui)
#-------------------------------------------------------------------------------
# Custom target to build on OSX
#-------------------------------------------------------------------------------
add_custom_target(
osx
COMMAND sudo ../utils/eos-osx-package.sh ${CPACK_PACKAGE_VERSION})
#-------------------------------------------------------------------------------
# Custom target to build graphviz for all target
#-------------------------------------------------------------------------------
include(EosGraphviz)
#-------------------------------------------------------------------------------
# Print project summary
#-------------------------------------------------------------------------------
include(EosSummary)
================================================
FILE: CTestConfig.cmake
================================================
set(CTEST_PROJECT_NAME "EOS")
set(CTEST_NIGHTLY_START_TIME "00:00:00 UTC")
set(CTEST_DROP_SITE_CDASH TRUE)
set(CTEST_SUBMIT_URL https://my.cdash.org/submit.php?project=EOS)
================================================
FILE: License
================================================
// GNU GENERAL PUBLIC LICENSE
// Version 3, 29 June 2007
//
// Copyright (C) 2007 Free Software Foundation, Inc.
// Everyone is permitted to copy and distribute verbatim copies
// of this license document, but changing it is not allowed.
//
// Preamble
//
// The GNU General Public License is a free, copyleft license for
// software and other kinds of works.
//
// The licenses for most software and other practical works are designed
// to take away your freedom to share and change the works. By contrast,
// the GNU General Public License is intended to guarantee your freedom to
// share and change all versions of a program--to make sure it remains free
// software for all its users. We, the Free Software Foundation, use the
// GNU General Public License for most of our software; it applies also to
// any other work released this way by its authors. You can apply it to
// your programs, too.
//
// When we speak of free software, we are referring to freedom, not
// price. Our General Public Licenses are designed to make sure that you
// have the freedom to distribute copies of free software (and charge for
// them if you wish), that you receive source code or can get it if you
// want it, that you can change the software or use pieces of it in new
// free programs, and that you know you can do these things.
//
// To protect your rights, we need to prevent others from denying you
// these rights or asking you to surrender the rights. Therefore, you have
// certain responsibilities if you distribute copies of the software, or if
// you modify it: responsibilities to respect the freedom of others.
//
// For example, if you distribute copies of such a program, whether
// gratis or for a fee, you must pass on to the recipients the same
// freedoms that you received. You must make sure that they, too, receive
// or can get the source code. And you must show them these terms so they
// know their rights.
//
// Developers that use the GNU GPL protect your rights with two steps:
// (1) assert copyright on the software, and (2) offer you this License
// giving you legal permission to copy, distribute and/or modify it.
//
// For the developers' and authors' protection, the GPL clearly explains
// that there is no warranty for this free software. For both users' and
// authors' sake, the GPL requires that modified versions be marked as
// changed, so that their problems will not be attributed erroneously to
// authors of previous versions.
//
// Some devices are designed to deny users access to install or run
// modified versions of the software inside them, although the manufacturer
// can do so. This is fundamentally incompatible with the aim of
// protecting users' freedom to change the software. The systematic
// pattern of such abuse occurs in the area of products for individuals to
// use, which is precisely where it is most unacceptable. Therefore, we
// have designed this version of the GPL to prohibit the practice for those
// products. If such problems arise substantially in other domains, we
// stand ready to extend this provision to those domains in future versions
// of the GPL, as needed to protect the freedom of users.
//
// Finally, every program is threatened constantly by software patents.
// States should not allow patents to restrict development and use of
// software on general-purpose computers, but in those that do, we wish to
// avoid the special danger that patents applied to a free program could
// make it effectively proprietary. To prevent this, the GPL assures that
// patents cannot be used to render the program non-free.
//
// The precise terms and conditions for copying, distribution and
// modification follow.
//
// TERMS AND CONDITIONS
//
// 0. Definitions.
//
// "This License" refers to version 3 of the GNU General Public License.
//
// "Copyright" also means copyright-like laws that apply to other kinds of
// works, such as semiconductor masks.
//
// "The Program" refers to any copyrightable work licensed under this
// License. Each licensee is addressed as "you". "Licensees" and
// "recipients" may be individuals or organizations.
//
// To "modify" a work means to copy from or adapt all or part of the work
// in a fashion requiring copyright permission, other than the making of an
// exact copy. The resulting work is called a "modified version" of the
// earlier work or a work "based on" the earlier work.
//
// A "covered work" means either the unmodified Program or a work based
// on the Program.
//
// To "propagate" a work means to do anything with it that, without
// permission, would make you directly or secondarily liable for
// infringement under applicable copyright law, except executing it on a
// computer or modifying a private copy. Propagation includes copying,
// distribution (with or without modification), making available to the
// public, and in some countries other activities as well.
//
// To "convey" a work means any kind of propagation that enables other
// parties to make or receive copies. Mere interaction with a user through
// a computer network, with no transfer of a copy, is not conveying.
//
// An interactive user interface displays "Appropriate Legal Notices"
// to the extent that it includes a convenient and prominently visible
// feature that (1) displays an appropriate copyright notice, and (2)
// tells the user that there is no warranty for the work (except to the
// extent that warranties are provided), that licensees may convey the
// work under this License, and how to view a copy of this License. If
// the interface presents a list of user commands or options, such as a
// menu, a prominent item in the list meets this criterion.
//
// 1. Source Code.
//
// The "source code" for a work means the preferred form of the work
// for making modifications to it. "Object code" means any non-source
// form of a work.
//
// A "Standard Interface" means an interface that either is an official
// standard defined by a recognized standards body, or, in the case of
// interfaces specified for a particular programming language, one that
// is widely used among developers working in that language.
//
// The "System Libraries" of an executable work include anything, other
// than the work as a whole, that (a) is included in the normal form of
// packaging a Major Component, but which is not part of that Major
// Component, and (b) serves only to enable use of the work with that
// Major Component, or to implement a Standard Interface for which an
// implementation is available to the public in source code form. A
// "Major Component", in this context, means a major essential component
// (kernel, window system, and so on) of the specific operating system
// (if any) on which the executable work runs, or a compiler used to
// produce the work, or an object code interpreter used to run it.
//
// The "Corresponding Source" for a work in object code form means all
// the source code needed to generate, install, and (for an executable
// work) run the object code and to modify the work, including scripts to
// control those activities. However, it does not include the work's
// System Libraries, or general-purpose tools or generally available free
// programs which are used unmodified in performing those activities but
// which are not part of the work. For example, Corresponding Source
// includes interface definition files associated with source files for
// the work, and the source code for shared libraries and dynamically
// linked subprograms that the work is specifically designed to require,
// such as by intimate data communication or control flow between those
// subprograms and other parts of the work.
//
// The Corresponding Source need not include anything that users
// can regenerate automatically from other parts of the Corresponding
// Source.
//
// The Corresponding Source for a work in source code form is that
// same work.
//
// 2. Basic Permissions.
//
// All rights granted under this License are granted for the term of
// copyright on the Program, and are irrevocable provided the stated
// conditions are met. This License explicitly affirms your unlimited
// permission to run the unmodified Program. The output from running a
// covered work is covered by this License only if the output, given its
// content, constitutes a covered work. This License acknowledges your
// rights of fair use or other equivalent, as provided by copyright law.
//
// You may make, run and propagate covered works that you do not
// convey, without conditions so long as your license otherwise remains
// in force. You may convey covered works to others for the sole purpose
// of having them make modifications exclusively for you, or provide you
// with facilities for running those works, provided that you comply with
// the terms of this License in conveying all material for which you do
// not control copyright. Those thus making or running the covered works
// for you must do so exclusively on your behalf, under your direction
// and control, on terms that prohibit them from making any copies of
// your copyrighted material outside their relationship with you.
//
// Conveying under any other circumstances is permitted solely under
// the conditions stated below. Sublicensing is not allowed; section 10
// makes it unnecessary.
//
// 3. Protecting Users' Legal Rights From Anti-Circumvention Law.
//
// No covered work shall be deemed part of an effective technological
// measure under any applicable law fulfilling obligations under article
// 11 of the WIPO copyright treaty adopted on 20 December 1996, or
// similar laws prohibiting or restricting circumvention of such
// measures.
//
// When you convey a covered work, you waive any legal power to forbid
// circumvention of technological measures to the extent such circumvention
// is effected by exercising rights under this License with respect to
// the covered work, and you disclaim any intention to limit operation or
// modification of the work as a means of enforcing, against the work's
// users, your or third parties' legal rights to forbid circumvention of
// technological measures.
//
// 4. Conveying Verbatim Copies.
//
// You may convey verbatim copies of the Program's source code as you
// receive it, in any medium, provided that you conspicuously and
// appropriately publish on each copy an appropriate copyright notice;
// keep intact all notices stating that this License and any
// non-permissive terms added in accord with section 7 apply to the code;
// keep intact all notices of the absence of any warranty; and give all
// recipients a copy of this License along with the Program.
//
// You may charge any price or no price for each copy that you convey,
// and you may offer support or warranty protection for a fee.
//
// 5. Conveying Modified Source Versions.
//
// You may convey a work based on the Program, or the modifications to
// produce it from the Program, in the form of source code under the
// terms of section 4, provided that you also meet all of these conditions:
//
// a) The work must carry prominent notices stating that you modified
// it, and giving a relevant date.
//
// b) The work must carry prominent notices stating that it is
// released under this License and any conditions added under section
// 7. This requirement modifies the requirement in section 4 to
// "keep intact all notices".
//
// c) You must license the entire work, as a whole, under this
// License to anyone who comes into possession of a copy. This
// License will therefore apply, along with any applicable section 7
// additional terms, to the whole of the work, and all its parts,
// regardless of how they are packaged. This License gives no
// permission to license the work in any other way, but it does not
// invalidate such permission if you have separately received it.
//
// d) If the work has interactive user interfaces, each must display
// Appropriate Legal Notices; however, if the Program has interactive
// interfaces that do not display Appropriate Legal Notices, your
// work need not make them do so.
//
// A compilation of a covered work with other separate and independent
// works, which are not by their nature extensions of the covered work,
// and which are not combined with it such as to form a larger program,
// in or on a volume of a storage or distribution medium, is called an
// "aggregate" if the compilation and its resulting copyright are not
// used to limit the access or legal rights of the compilation's users
// beyond what the individual works permit. Inclusion of a covered work
// in an aggregate does not cause this License to apply to the other
// parts of the aggregate.
//
// 6. Conveying Non-Source Forms.
//
// You may convey a covered work in object code form under the terms
// of sections 4 and 5, provided that you also convey the
// machine-readable Corresponding Source under the terms of this License,
// in one of these ways:
//
// a) Convey the object code in, or embodied in, a physical product
// (including a physical distribution medium), accompanied by the
// Corresponding Source fixed on a durable physical medium
// customarily used for software interchange.
//
// b) Convey the object code in, or embodied in, a physical product
// (including a physical distribution medium), accompanied by a
// written offer, valid for at least three years and valid for as
// long as you offer spare parts or customer support for that product
// model, to give anyone who possesses the object code either (1) a
// copy of the Corresponding Source for all the software in the
// product that is covered by this License, on a durable physical
// medium customarily used for software interchange, for a price no
// more than your reasonable cost of physically performing this
// conveying of source, or (2) access to copy the
// Corresponding Source from a network server at no charge.
//
// c) Convey individual copies of the object code with a copy of the
// written offer to provide the Corresponding Source. This
// alternative is allowed only occasionally and noncommercially, and
// only if you received the object code with such an offer, in accord
// with subsection 6b.
//
// d) Convey the object code by offering access from a designated
// place (gratis or for a charge), and offer equivalent access to the
// Corresponding Source in the same way through the same place at no
// further charge. You need not require recipients to copy the
// Corresponding Source along with the object code. If the place to
// copy the object code is a network server, the Corresponding Source
// may be on a different server (operated by you or a third party)
// that supports equivalent copying facilities, provided you maintain
// clear directions next to the object code saying where to find the
// Corresponding Source. Regardless of what server hosts the
// Corresponding Source, you remain obligated to ensure that it is
// available for as long as needed to satisfy these requirements.
//
// e) Convey the object code using peer-to-peer transmission, provided
// you inform other peers where the object code and Corresponding
// Source of the work are being offered to the general public at no
// charge under subsection 6d.
//
// A separable portion of the object code, whose source code is excluded
// from the Corresponding Source as a System Library, need not be
// included in conveying the object code work.
//
// A "User Product" is either (1) a "consumer product", which means any
// tangible personal property which is normally used for personal, family,
// or household purposes, or (2) anything designed or sold for incorporation
// into a dwelling. In determining whether a product is a consumer product,
// doubtful cases shall be resolved in favor of coverage. For a particular
// product received by a particular user, "normally used" refers to a
// typical or common use of that class of product, regardless of the status
// of the particular user or of the way in which the particular user
// actually uses, or expects or is expected to use, the product. A product
// is a consumer product regardless of whether the product has substantial
// commercial, industrial or non-consumer uses, unless such uses represent
// the only significant mode of use of the product.
//
// "Installation Information" for a User Product means any methods,
// procedures, authorization keys, or other information required to install
// and execute modified versions of a covered work in that User Product from
// a modified version of its Corresponding Source. The information must
// suffice to ensure that the continued functioning of the modified object
// code is in no case prevented or interfered with solely because
// modification has been made.
//
// If you convey an object code work under this section in, or with, or
// specifically for use in, a User Product, and the conveying occurs as
// part of a transaction in which the right of possession and use of the
// User Product is transferred to the recipient in perpetuity or for a
// fixed term (regardless of how the transaction is characterized), the
// Corresponding Source conveyed under this section must be accompanied
// by the Installation Information. But this requirement does not apply
// if neither you nor any third party retains the ability to install
// modified object code on the User Product (for example, the work has
// been installed in ROM).
//
// The requirement to provide Installation Information does not include a
// requirement to continue to provide support service, warranty, or updates
// for a work that has been modified or installed by the recipient, or for
// the User Product in which it has been modified or installed. Access to a
// network may be denied when the modification itself materially and
// adversely affects the operation of the network or violates the rules and
// protocols for communication across the network.
//
// Corresponding Source conveyed, and Installation Information provided,
// in accord with this section must be in a format that is publicly
// documented (and with an implementation available to the public in
// source code form), and must require no special password or key for
// unpacking, reading or copying.
//
// 7. Additional Terms.
//
// "Additional permissions" are terms that supplement the terms of this
// License by making exceptions from one or more of its conditions.
// Additional permissions that are applicable to the entire Program shall
// be treated as though they were included in this License, to the extent
// that they are valid under applicable law. If additional permissions
// apply only to part of the Program, that part may be used separately
// under those permissions, but the entire Program remains governed by
// this License without regard to the additional permissions.
//
// When you convey a copy of a covered work, you may at your option
// remove any additional permissions from that copy, or from any part of
// it. (Additional permissions may be written to require their own
// removal in certain cases when you modify the work.) You may place
// additional permissions on material, added by you to a covered work,
// for which you have or can give appropriate copyright permission.
//
// Notwithstanding any other provision of this License, for material you
// add to a covered work, you may (if authorized by the copyright holders of
// that material) supplement the terms of this License with terms:
//
// a) Disclaiming warranty or limiting liability differently from the
// terms of sections 15 and 16 of this License; or
//
// b) Requiring preservation of specified reasonable legal notices or
// author attributions in that material or in the Appropriate Legal
// Notices displayed by works containing it; or
//
// c) Prohibiting misrepresentation of the origin of that material, or
// requiring that modified versions of such material be marked in
// reasonable ways as different from the original version; or
//
// d) Limiting the use for publicity purposes of names of licensors or
// authors of the material; or
//
// e) Declining to grant rights under trademark law for use of some
// trade names, trademarks, or service marks; or
//
// f) Requiring indemnification of licensors and authors of that
// material by anyone who conveys the material (or modified versions of
// it) with contractual assumptions of liability to the recipient, for
// any liability that these contractual assumptions directly impose on
// those licensors and authors.
//
// All other non-permissive additional terms are considered "further
// restrictions" within the meaning of section 10. If the Program as you
// received it, or any part of it, contains a notice stating that it is
// governed by this License along with a term that is a further
// restriction, you may remove that term. If a license document contains
// a further restriction but permits relicensing or conveying under this
// License, you may add to a covered work material governed by the terms
// of that license document, provided that the further restriction does
// not survive such relicensing or conveying.
//
// If you add terms to a covered work in accord with this section, you
// must place, in the relevant source files, a statement of the
// additional terms that apply to those files, or a notice indicating
// where to find the applicable terms.
//
// Additional terms, permissive or non-permissive, may be stated in the
// form of a separately written license, or stated as exceptions;
// the above requirements apply either way.
//
// 8. Termination.
//
// You may not propagate or modify a covered work except as expressly
// provided under this License. Any attempt otherwise to propagate or
// modify it is void, and will automatically terminate your rights under
// this License (including any patent licenses granted under the third
// paragraph of section 11).
//
// However, if you cease all violation of this License, then your
// license from a particular copyright holder is reinstated (a)
// provisionally, unless and until the copyright holder explicitly and
// finally terminates your license, and (b) permanently, if the copyright
// holder fails to notify you of the violation by some reasonable means
// prior to 60 days after the cessation.
//
// Moreover, your license from a particular copyright holder is
// reinstated permanently if the copyright holder notifies you of the
// violation by some reasonable means, this is the first time you have
// received notice of violation of this License (for any work) from that
// copyright holder, and you cure the violation prior to 30 days after
// your receipt of the notice.
//
// Termination of your rights under this section does not terminate the
// licenses of parties who have received copies or rights from you under
// this License. If your rights have been terminated and not permanently
// reinstated, you do not qualify to receive new licenses for the same
// material under section 10.
//
// 9. Acceptance Not Required for Having Copies.
//
// You are not required to accept this License in order to receive or
// run a copy of the Program. Ancillary propagation of a covered work
// occurring solely as a consequence of using peer-to-peer transmission
// to receive a copy likewise does not require acceptance. However,
// nothing other than this License grants you permission to propagate or
// modify any covered work. These actions infringe copyright if you do
// not accept this License. Therefore, by modifying or propagating a
// covered work, you indicate your acceptance of this License to do so.
//
// 10. Automatic Licensing of Downstream Recipients.
//
// Each time you convey a covered work, the recipient automatically
// receives a license from the original licensors, to run, modify and
// propagate that work, subject to this License. You are not responsible
// for enforcing compliance by third parties with this License.
//
// An "entity transaction" is a transaction transferring control of an
// organization, or substantially all assets of one, or subdividing an
// organization, or merging organizations. If propagation of a covered
// work results from an entity transaction, each party to that
// transaction who receives a copy of the work also receives whatever
// licenses to the work the party's predecessor in interest had or could
// give under the previous paragraph, plus a right to possession of the
// Corresponding Source of the work from the predecessor in interest, if
// the predecessor has it or can get it with reasonable efforts.
//
// You may not impose any further restrictions on the exercise of the
// rights granted or affirmed under this License. For example, you may
// not impose a license fee, royalty, or other charge for exercise of
// rights granted under this License, and you may not initiate litigation
// (including a cross-claim or counterclaim in a lawsuit) alleging that
// any patent claim is infringed by making, using, selling, offering for
// sale, or importing the Program or any portion of it.
//
// 11. Patents.
//
// A "contributor" is a copyright holder who authorizes use under this
// License of the Program or a work on which the Program is based. The
// work thus licensed is called the contributor's "contributor version".
//
// A contributor's "essential patent claims" are all patent claims
// owned or controlled by the contributor, whether already acquired or
// hereafter acquired, that would be infringed by some manner, permitted
// by this License, of making, using, or selling its contributor version,
// but do not include claims that would be infringed only as a
// consequence of further modification of the contributor version. For
// purposes of this definition, "control" includes the right to grant
// patent sublicenses in a manner consistent with the requirements of
// this License.
//
// Each contributor grants you a non-exclusive, worldwide, royalty-free
// patent license under the contributor's essential patent claims, to
// make, use, sell, offer for sale, import and otherwise run, modify and
// propagate the contents of its contributor version.
//
// In the following three paragraphs, a "patent license" is any express
// agreement or commitment, however denominated, not to enforce a patent
// (such as an express permission to practice a patent or covenant not to
// sue for patent infringement). To "grant" such a patent license to a
// party means to make such an agreement or commitment not to enforce a
// patent against the party.
//
// If you convey a covered work, knowingly relying on a patent license,
// and the Corresponding Source of the work is not available for anyone
// to copy, free of charge and under the terms of this License, through a
// publicly available network server or other readily accessible means,
// then you must either (1) cause the Corresponding Source to be so
// available, or (2) arrange to deprive yourself of the benefit of the
// patent license for this particular work, or (3) arrange, in a manner
// consistent with the requirements of this License, to extend the patent
// license to downstream recipients. "Knowingly relying" means you have
// actual knowledge that, but for the patent license, your conveying the
// covered work in a country, or your recipient's use of the covered work
// in a country, would infringe one or more identifiable patents in that
// country that you have reason to believe are valid.
//
// If, pursuant to or in connection with a single transaction or
// arrangement, you convey, or propagate by procuring conveyance of, a
// covered work, and grant a patent license to some of the parties
// receiving the covered work authorizing them to use, propagate, modify
// or convey a specific copy of the covered work, then the patent license
// you grant is automatically extended to all recipients of the covered
// work and works based on it.
//
// A patent license is "discriminatory" if it does not include within
// the scope of its coverage, prohibits the exercise of, or is
// conditioned on the non-exercise of one or more of the rights that are
// specifically granted under this License. You may not convey a covered
// work if you are a party to an arrangement with a third party that is
// in the business of distributing software, under which you make payment
// to the third party based on the extent of your activity of conveying
// the work, and under which the third party grants, to any of the
// parties who would receive the covered work from you, a discriminatory
// patent license (a) in connection with copies of the covered work
// conveyed by you (or copies made from those copies), or (b) primarily
// for and in connection with specific products or compilations that
// contain the covered work, unless you entered into that arrangement,
// or that patent license was granted, prior to 28 March 2007.
//
// Nothing in this License shall be construed as excluding or limiting
// any implied license or other defenses to infringement that may
// otherwise be available to you under applicable patent law.
//
// 12. No Surrender of Others' Freedom.
//
// If conditions are imposed on you (whether by court order, agreement or
// otherwise) that contradict the conditions of this License, they do not
// excuse you from the conditions of this License. If you cannot convey a
// covered work so as to satisfy simultaneously your obligations under this
// License and any other pertinent obligations, then as a consequence you may
// not convey it at all. For example, if you agree to terms that obligate you
// to collect a royalty for further conveying from those to whom you convey
// the Program, the only way you could satisfy both those terms and this
// License would be to refrain entirely from conveying the Program.
//
// 13. Use with the GNU Affero General Public License.
//
// Notwithstanding any other provision of this License, you have
// permission to link or combine any covered work with a work licensed
// under version 3 of the GNU Affero General Public License into a single
// combined work, and to convey the resulting work. The terms of this
// License will continue to apply to the part which is the covered work,
// but the special requirements of the GNU Affero General Public License,
// section 13, concerning interaction through a network will apply to the
// combination as such.
//
// 14. Revised Versions of this License.
//
// The Free Software Foundation may publish revised and/or new versions of
// the GNU General Public License from time to time. Such new versions will
// be similar in spirit to the present version, but may differ in detail to
// address new problems or concerns.
//
// Each version is given a distinguishing version number. If the
// Program specifies that a certain numbered version of the GNU General
// Public License "or any later version" applies to it, you have the
// option of following the terms and conditions either of that numbered
// version or of any later version published by the Free Software
// Foundation. If the Program does not specify a version number of the
// GNU General Public License, you may choose any version ever published
// by the Free Software Foundation.
//
// If the Program specifies that a proxy can decide which future
// versions of the GNU General Public License can be used, that proxy's
// public statement of acceptance of a version permanently authorizes you
// to choose that version for the Program.
//
// Later license versions may give you additional or different
// permissions. However, no additional obligations are imposed on any
// author or copyright holder as a result of your choosing to follow a
// later version.
//
// 15. Disclaimer of Warranty.
//
// THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
// APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
// HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
// OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
// THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
// IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
// ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
//
// 16. Limitation of Liability.
//
// IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
// WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
// THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
// GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
// USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
// DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
// PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
// EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
// SUCH DAMAGES.
//
// 17. Interpretation of Sections 15 and 16.
//
// If the disclaimer of warranty and limitation of liability provided
// above cannot be given local legal effect according to their terms,
// reviewing courts shall apply local law that most closely approximates
// an absolute waiver of all civil liability in connection with the
// Program, unless a warranty or assumption of liability accompanies a
// copy of the Program in return for a fee.
//
// END OF TERMS AND CONDITIONS
const char* license=" /************************************************************************************\n \
* EOS - the CERN Disk Storage System *\n \
* Copyright (C) 2011 CERN/Switzerland *\n \
* *\n \
* This program is free software: you can redistribute it and/or modify *\n \
* it under the terms of the GNU General Public License as published by *\n \
* the Free Software Foundation, either version 3 of the License, or *\n \
* (at your option) any later version. *\n \
* *\n \
* This program is distributed in the hope that it will be useful, *\n \
* but WITHOUT ANY WARRANTY; without even the implied warranty of *\n \
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *\n \
* GNU General Public License for more details. *\n \
* *\n \
* EOS is based on the XRootD software: *\n \
* ----------------------------------------------------------------------------------*\n \
* Copyright (C) 2005-2010, Board of Trustees of the Leland Stanford, Jr. University.*\n \
* Produced under contract DE-AC02-76-SF00515 with the US Department of Energy. *\n \
* All rights reserved. *\n \
* See for more details. *\n \
* *\n \
* EOS uses crc32c checksum alogrithms from MIT/Intel: *\n \
* ----------------------------------------------------------------------------------*\n \
* Copyright 2008,2009,2010 Massachusetts Institute of Technology. *\n \
* Implementations adapted from Intel's Slicing By 8 Sourceforge Project *\n \
* http://sourceforge.net/projects/slicing-by-8/ *\n \
* Copyright (c) 2004-2006 Intel Corporation *\n \
************************************************************************************/\n \
\n \
GNU GENERAL PUBLIC LICENSE\n \
Version 3, 29 June 2007\n \
\n \
Copyright (C) 2007 Free Software Foundation, Inc. \n \
Everyone is permitted to copy and distribute verbatim copies\n \
of this license document, but changing it is not allowed.\n \
\n \
Preamble\n \
\n \
The GNU General Public License is a free, copyleft license for\n \
software and other kinds of works.\n \
\n \
The licenses for most software and other practical works are designed\n \
to take away your freedom to share and change the works. By contrast,\n \
the GNU General Public License is intended to guarantee your freedom to\n \
share and change all versions of a program--to make sure it remains free\n \
software for all its users. We, the Free Software Foundation, use the\n \
GNU General Public License for most of our software; it applies also to\n \
any other work released this way by its authors. You can apply it to\n \
your programs, too.\n \
\n \
When we speak of free software, we are referring to freedom, not\n \
price. Our General Public Licenses are designed to make sure that you\n \
have the freedom to distribute copies of free software (and charge for\n \
them if you wish), that you receive source code or can get it if you\n \
want it, that you can change the software or use pieces of it in new\n \
free programs, and that you know you can do these things.\n \
\n \
To protect your rights, we need to prevent others from denying you\n \
these rights or asking you to surrender the rights. Therefore, you have\n \
certain responsibilities if you distribute copies of the software, or if\n \
you modify it: responsibilities to respect the freedom of others.\n \
\n \
For example, if you distribute copies of such a program, whether\n \
gratis or for a fee, you must pass on to the recipients the same\n \
freedoms that you received. You must make sure that they, too, receive\n \
or can get the source code. And you must show them these terms so they\n \
know their rights.\n \
\n \
Developers that use the GNU GPL protect your rights with two steps:\n \
(1) assert copyright on the software, and (2) offer you this License\n \
giving you legal permission to copy, distribute and/or modify it.\n \
\n \
For the developers' and authors' protection, the GPL clearly explains\n \
that there is no warranty for this free software. For both users' and\n \
authors' sake, the GPL requires that modified versions be marked as\n \
changed, so that their problems will not be attributed erroneously to\n \
authors of previous versions.\n \
\n \
Some devices are designed to deny users access to install or run\n \
modified versions of the software inside them, although the manufacturer\n \
can do so. This is fundamentally incompatible with the aim of\n \
protecting users' freedom to change the software. The systematic\n \
pattern of such abuse occurs in the area of products for individuals to\n \
use, which is precisely where it is most unacceptable. Therefore, we\n \
have designed this version of the GPL to prohibit the practice for those\n \
products. If such problems arise substantially in other domains, we\n \
stand ready to extend this provision to those domains in future versions\n \
of the GPL, as needed to protect the freedom of users.\n \
\n \
Finally, every program is threatened constantly by software patents.\n \
States should not allow patents to restrict development and use of\n \
software on general-purpose computers, but in those that do, we wish to\n \
avoid the special danger that patents applied to a free program could\n \
make it effectively proprietary. To prevent this, the GPL assures that\n \
patents cannot be used to render the program non-free.\n \
\n \
The precise terms and conditions for copying, distribution and\n \
modification follow.\n \
\n \
TERMS AND CONDITIONS\n \
\n \
0. Definitions.\n \
\n \
\"This License\" refers to version 3 of the GNU General Public License.\n \
\n \
\"Copyright\" also means copyright-like laws that apply to other kinds of\n \
works, such as semiconductor masks.\n \
\n \
\"The Program\" refers to any copyrightable work licensed under this\n \
License. Each licensee is addressed as \"you\". \"Licensees\" and\n \
\"recipients\" may be individuals or organizations.\n \
\n \
To \"modify\" a work means to copy from or adapt all or part of the work\n \
in a fashion requiring copyright permission, other than the making of an\n \
exact copy. The resulting work is called a \"modified version\" of the\n \
earlier work or a work \"based on\" the earlier work.\n \
\n \
A \"covered work\" means either the unmodified Program or a work based\n \
on the Program.\n \
\n \
To \"propagate\" a work means to do anything with it that, without\n \
permission, would make you directly or secondarily liable for\n \
infringement under applicable copyright law, except executing it on a\n \
computer or modifying a private copy. Propagation includes copying,\n \
distribution (with or without modification), making available to the\n \
public, and in some countries other activities as well.\n \
\n \
To \"convey\" a work means any kind of propagation that enables other\n \
parties to make or receive copies. Mere interaction with a user through\n \
a computer network, with no transfer of a copy, is not conveying.\n \
\n \
An interactive user interface displays \"Appropriate Legal Notices\"\n \
to the extent that it includes a convenient and prominently visible\n \
feature that (1) displays an appropriate copyright notice, and (2)\n \
tells the user that there is no warranty for the work (except to the\n \
extent that warranties are provided), that licensees may convey the\n \
work under this License, and how to view a copy of this License. If\n \
the interface presents a list of user commands or options, such as a\n \
menu, a prominent item in the list meets this criterion.\n \
\n \
1. Source Code.\n \
\n \
The \"source code\" for a work means the preferred form of the work\n \
for making modifications to it. \"Object code\" means any non-source\n \
form of a work.\n \
\n \
A \"Standard Interface\" means an interface that either is an official\n \
standard defined by a recognized standards body, or, in the case of\n \
interfaces specified for a particular programming language, one that\n \
is widely used among developers working in that language.\n \
\n \
The \"System Libraries\" of an executable work include anything, other\n \
than the work as a whole, that (a) is included in the normal form of\n \
packaging a Major Component, but which is not part of that Major\n \
Component, and (b) serves only to enable use of the work with that\n \
Major Component, or to implement a Standard Interface for which an\n \
implementation is available to the public in source code form. A\n \
\"Major Component\", in this context, means a major essential component\n \
(kernel, window system, and so on) of the specific operating system\n \
(if any) on which the executable work runs, or a compiler used to\n \
produce the work, or an object code interpreter used to run it.\n \
\n \
The \"Corresponding Source\" for a work in object code form means all\n \
the source code needed to generate, install, and (for an executable\n \
work) run the object code and to modify the work, including scripts to\n \
control those activities. However, it does not include the work's\n \
System Libraries, or general-purpose tools or generally available free\n \
programs which are used unmodified in performing those activities but\n \
which are not part of the work. For example, Corresponding Source\n \
includes interface definition files associated with source files for\n \
the work, and the source code for shared libraries and dynamically\n \
linked subprograms that the work is specifically designed to require,\n \
such as by intimate data communication or control flow between those\n \
subprograms and other parts of the work.\n \
\n \
The Corresponding Source need not include anything that users\n \
can regenerate automatically from other parts of the Corresponding\n \
Source.\n \
\n \
The Corresponding Source for a work in source code form is that\n \
same work.\n \
\n \
2. Basic Permissions.\n \
\n \
All rights granted under this License are granted for the term of\n \
copyright on the Program, and are irrevocable provided the stated\n \
conditions are met. This License explicitly affirms your unlimited\n \
permission to run the unmodified Program. The output from running a\n \
covered work is covered by this License only if the output, given its\n \
content, constitutes a covered work. This License acknowledges your\n \
rights of fair use or other equivalent, as provided by copyright law.\n \
\n \
You may make, run and propagate covered works that you do not\n \
convey, without conditions so long as your license otherwise remains\n \
in force. You may convey covered works to others for the sole purpose\n \
of having them make modifications exclusively for you, or provide you\n \
with facilities for running those works, provided that you comply with\n \
the terms of this License in conveying all material for which you do\n \
not control copyright. Those thus making or running the covered works\n \
for you must do so exclusively on your behalf, under your direction\n \
and control, on terms that prohibit them from making any copies of\n \
your copyrighted material outside their relationship with you.\n \
\n \
Conveying under any other circumstances is permitted solely under\n \
the conditions stated below. Sublicensing is not allowed; section 10\n \
makes it unnecessary.\n \
\n \
3. Protecting Users' Legal Rights From Anti-Circumvention Law.\n \
\n \
No covered work shall be deemed part of an effective technological\n \
measure under any applicable law fulfilling obligations under article\n \
11 of the WIPO copyright treaty adopted on 20 December 1996, or\n \
similar laws prohibiting or restricting circumvention of such\n \
measures.\n \
\n \
When you convey a covered work, you waive any legal power to forbid\n \
circumvention of technological measures to the extent such circumvention\n \
is effected by exercising rights under this License with respect to\n \
the covered work, and you disclaim any intention to limit operation or\n \
modification of the work as a means of enforcing, against the work's\n \
users, your or third parties' legal rights to forbid circumvention of\n \
technological measures.\n \
\n \
4. Conveying Verbatim Copies.\n \
\n \
You may convey verbatim copies of the Program's source code as you\n \
receive it, in any medium, provided that you conspicuously and\n \
appropriately publish on each copy an appropriate copyright notice;\n \
keep intact all notices stating that this License and any\n \
non-permissive terms added in accord with section 7 apply to the code;\n \
keep intact all notices of the absence of any warranty; and give all\n \
recipients a copy of this License along with the Program.\n \
\n \
You may charge any price or no price for each copy that you convey,\n \
and you may offer support or warranty protection for a fee.\n \
\n \
5. Conveying Modified Source Versions.\n \
\n \
You may convey a work based on the Program, or the modifications to\n \
produce it from the Program, in the form of source code under the\n \
terms of section 4, provided that you also meet all of these conditions:\n \
\n \
a) The work must carry prominent notices stating that you modified\n \
it, and giving a relevant date.\n \
\n \
b) The work must carry prominent notices stating that it is\n \
released under this License and any conditions added under section\n \
7. This requirement modifies the requirement in section 4 to\n \
\"keep intact all notices\".\n \
\n \
c) You must license the entire work, as a whole, under this\n \
License to anyone who comes into possession of a copy. This\n \
License will therefore apply, along with any applicable section 7\n \
additional terms, to the whole of the work, and all its parts,\n \
regardless of how they are packaged. This License gives no\n \
permission to license the work in any other way, but it does not\n \
invalidate such permission if you have separately received it.\n \
\n \
d) If the work has interactive user interfaces, each must display\n \
Appropriate Legal Notices; however, if the Program has interactive\n \
interfaces that do not display Appropriate Legal Notices, your\n \
work need not make them do so.\n \
\n \
A compilation of a covered work with other separate and independent\n \
works, which are not by their nature extensions of the covered work,\n \
and which are not combined with it such as to form a larger program,\n \
in or on a volume of a storage or distribution medium, is called an\n \
\"aggregate\" if the compilation and its resulting copyright are not\n \
used to limit the access or legal rights of the compilation's users\n \
beyond what the individual works permit. Inclusion of a covered work\n \
in an aggregate does not cause this License to apply to the other\n \
parts of the aggregate.\n \
\n \
6. Conveying Non-Source Forms.\n \
\n \
You may convey a covered work in object code form under the terms\n \
of sections 4 and 5, provided that you also convey the\n \
machine-readable Corresponding Source under the terms of this License,\n \
in one of these ways:\n \
\n \
a) Convey the object code in, or embodied in, a physical product\n \
(including a physical distribution medium), accompanied by the\n \
Corresponding Source fixed on a durable physical medium\n \
customarily used for software interchange.\n \
\n \
b) Convey the object code in, or embodied in, a physical product\n \
(including a physical distribution medium), accompanied by a\n \
written offer, valid for at least three years and valid for as\n \
long as you offer spare parts or customer support for that product\n \
model, to give anyone who possesses the object code either (1) a\n \
copy of the Corresponding Source for all the software in the\n \
product that is covered by this License, on a durable physical\n \
medium customarily used for software interchange, for a price no\n \
more than your reasonable cost of physically performing this\n \
conveying of source, or (2) access to copy the\n \
Corresponding Source from a network server at no charge.\n \
\n \
c) Convey individual copies of the object code with a copy of the\n \
written offer to provide the Corresponding Source. This\n \
alternative is allowed only occasionally and noncommercially, and\n \
only if you received the object code with such an offer, in accord\n \
with subsection 6b.\n \
\n \
d) Convey the object code by offering access from a designated\n \
place (gratis or for a charge), and offer equivalent access to the\n \
Corresponding Source in the same way through the same place at no\n \
further charge. You need not require recipients to copy the\n \
Corresponding Source along with the object code. If the place to\n \
copy the object code is a network server, the Corresponding Source\n \
may be on a different server (operated by you or a third party)\n \
that supports equivalent copying facilities, provided you maintain\n \
clear directions next to the object code saying where to find the\n \
Corresponding Source. Regardless of what server hosts the\n \
Corresponding Source, you remain obligated to ensure that it is\n \
available for as long as needed to satisfy these requirements.\n \
\n \
e) Convey the object code using peer-to-peer transmission, provided\n \
you inform other peers where the object code and Corresponding\n \
Source of the work are being offered to the general public at no\n \
charge under subsection 6d.\n \
\n \
A separable portion of the object code, whose source code is excluded\n \
from the Corresponding Source as a System Library, need not be\n \
included in conveying the object code work.\n \
\n \
A \"User Product\" is either (1) a \"consumer product\", which means any\n \
tangible personal property which is normally used for personal, family,\n \
or household purposes, or (2) anything designed or sold for incorporation\n \
into a dwelling. In determining whether a product is a consumer product,\n \
doubtful cases shall be resolved in favor of coverage. For a particular\n \
product received by a particular user, \"normally used\" refers to a\n \
typical or common use of that class of product, regardless of the status\n \
of the particular user or of the way in which the particular user\n \
actually uses, or expects or is expected to use, the product. A product\n \
is a consumer product regardless of whether the product has substantial\n \
commercial, industrial or non-consumer uses, unless such uses represent\n \
the only significant mode of use of the product.\n \
\n \
\"Installation Information\" for a User Product means any methods,\n \
procedures, authorization keys, or other information required to install\n \
and execute modified versions of a covered work in that User Product from\n \
a modified version of its Corresponding Source. The information must\n \
suffice to ensure that the continued functioning of the modified object\n \
code is in no case prevented or interfered with solely because\n \
modification has been made.\n \
\n \
If you convey an object code work under this section in, or with, or\n \
specifically for use in, a User Product, and the conveying occurs as\n \
part of a transaction in which the right of possession and use of the\n \
User Product is transferred to the recipient in perpetuity or for a\n \
fixed term (regardless of how the transaction is characterized), the\n \
Corresponding Source conveyed under this section must be accompanied\n \
by the Installation Information. But this requirement does not apply\n \
if neither you nor any third party retains the ability to install\n \
modified object code on the User Product (for example, the work has\n \
been installed in ROM).\n \
\n \
The requirement to provide Installation Information does not include a\n \
requirement to continue to provide support service, warranty, or updates\n \
for a work that has been modified or installed by the recipient, or for\n \
the User Product in which it has been modified or installed. Access to a\n \
network may be denied when the modification itself materially and\n \
adversely affects the operation of the network or violates the rules and\n \
protocols for communication across the network.\n \
\n \
Corresponding Source conveyed, and Installation Information provided,\n \
in accord with this section must be in a format that is publicly\n \
documented (and with an implementation available to the public in\n \
source code form), and must require no special password or key for\n \
unpacking, reading or copying.\n \
\n \
7. Additional Terms.\n \
\n \
\"Additional permissions\" are terms that supplement the terms of this\n \
License by making exceptions from one or more of its conditions.\n \
Additional permissions that are applicable to the entire Program shall\n \
be treated as though they were included in this License, to the extent\n \
that they are valid under applicable law. If additional permissions\n \
apply only to part of the Program, that part may be used separately\n \
under those permissions, but the entire Program remains governed by\n \
this License without regard to the additional permissions.\n \
\n \
When you convey a copy of a covered work, you may at your option\n \
remove any additional permissions from that copy, or from any part of\n \
it. (Additional permissions may be written to require their own\n \
removal in certain cases when you modify the work.) You may place\n \
additional permissions on material, added by you to a covered work,\n \
for which you have or can give appropriate copyright permission.\n \
\n \
Notwithstanding any other provision of this License, for material you\n \
add to a covered work, you may (if authorized by the copyright holders of\n \
that material) supplement the terms of this License with terms:\n \
\n \
a) Disclaiming warranty or limiting liability differently from the\n \
terms of sections 15 and 16 of this License; or\n \
\n \
b) Requiring preservation of specified reasonable legal notices or\n \
author attributions in that material or in the Appropriate Legal\n \
Notices displayed by works containing it; or\n \
\n \
c) Prohibiting misrepresentation of the origin of that material, or\n \
requiring that modified versions of such material be marked in\n \
reasonable ways as different from the original version; or\n \
\n \
d) Limiting the use for publicity purposes of names of licensors or\n \
authors of the material; or\n \
\n \
e) Declining to grant rights under trademark law for use of some\n \
trade names, trademarks, or service marks; or\n \
\n \
f) Requiring indemnification of licensors and authors of that\n \
material by anyone who conveys the material (or modified versions of\n \
it) with contractual assumptions of liability to the recipient, for\n \
any liability that these contractual assumptions directly impose on\n \
those licensors and authors.\n \
\n \
All other non-permissive additional terms are considered \"further\n \
restrictions\" within the meaning of section 10. If the Program as you\n \
received it, or any part of it, contains a notice stating that it is\n \
governed by this License along with a term that is a further\n \
restriction, you may remove that term. If a license document contains\n \
a further restriction but permits relicensing or conveying under this\n \
License, you may add to a covered work material governed by the terms\n \
of that license document, provided that the further restriction does\n \
not survive such relicensing or conveying.\n \
\n \
If you add terms to a covered work in accord with this section, you\n \
must place, in the relevant source files, a statement of the\n \
additional terms that apply to those files, or a notice indicating\n \
where to find the applicable terms.\n \
\n \
Additional terms, permissive or non-permissive, may be stated in the\n \
form of a separately written license, or stated as exceptions;\n \
the above requirements apply either way.\n \
\n \
8. Termination.\n \
\n \
You may not propagate or modify a covered work except as expressly\n \
provided under this License. Any attempt otherwise to propagate or\n \
modify it is void, and will automatically terminate your rights under\n \
this License (including any patent licenses granted under the third\n \
paragraph of section 11).\n \
\n \
However, if you cease all violation of this License, then your\n \
license from a particular copyright holder is reinstated (a)\n \
provisionally, unless and until the copyright holder explicitly and\n \
finally terminates your license, and (b) permanently, if the copyright\n \
holder fails to notify you of the violation by some reasonable means\n \
prior to 60 days after the cessation.\n \
\n \
Moreover, your license from a particular copyright holder is\n \
reinstated permanently if the copyright holder notifies you of the\n \
violation by some reasonable means, this is the first time you have\n \
received notice of violation of this License (for any work) from that\n \
copyright holder, and you cure the violation prior to 30 days after\n \
your receipt of the notice.\n \
\n \
Termination of your rights under this section does not terminate the\n \
licenses of parties who have received copies or rights from you under\n \
this License. If your rights have been terminated and not permanently\n \
reinstated, you do not qualify to receive new licenses for the same\n \
material under section 10.\n \
\n \
9. Acceptance Not Required for Having Copies.\n \
\n \
You are not required to accept this License in order to receive or\n \
run a copy of the Program. Ancillary propagation of a covered work\n \
occurring solely as a consequence of using peer-to-peer transmission\n \
to receive a copy likewise does not require acceptance. However,\n \
nothing other than this License grants you permission to propagate or\n \
modify any covered work. These actions infringe copyright if you do\n \
not accept this License. Therefore, by modifying or propagating a\n \
covered work, you indicate your acceptance of this License to do so.\n \
\n \
10. Automatic Licensing of Downstream Recipients.\n \
\n \
Each time you convey a covered work, the recipient automatically\n \
receives a license from the original licensors, to run, modify and\n \
propagate that work, subject to this License. You are not responsible\n \
for enforcing compliance by third parties with this License.\n \
\n \
An \"entity transaction\" is a transaction transferring control of an\n \
organization, or substantially all assets of one, or subdividing an\n \
organization, or merging organizations. If propagation of a covered\n \
work results from an entity transaction, each party to that\n \
transaction who receives a copy of the work also receives whatever\n \
licenses to the work the party's predecessor in interest had or could\n \
give under the previous paragraph, plus a right to possession of the\n \
Corresponding Source of the work from the predecessor in interest, if\n \
the predecessor has it or can get it with reasonable efforts.\n \
\n \
You may not impose any further restrictions on the exercise of the\n \
rights granted or affirmed under this License. For example, you may\n \
not impose a license fee, royalty, or other charge for exercise of\n \
rights granted under this License, and you may not initiate litigation\n \
(including a cross-claim or counterclaim in a lawsuit) alleging that\n \
any patent claim is infringed by making, using, selling, offering for\n \
sale, or importing the Program or any portion of it.\n \
\n \
11. Patents.\n \
\n \
A \"contributor\" is a copyright holder who authorizes use under this\n \
License of the Program or a work on which the Program is based. The\n \
work thus licensed is called the contributor's \"contributor version\".\n \
\n \
A contributor's \"essential patent claims\" are all patent claims\n \
owned or controlled by the contributor, whether already acquired or\n \
hereafter acquired, that would be infringed by some manner, permitted\n \
by this License, of making, using, or selling its contributor version,\n \
but do not include claims that would be infringed only as a\n \
consequence of further modification of the contributor version. For\n \
purposes of this definition, \"control\" includes the right to grant\n \
patent sublicenses in a manner consistent with the requirements of\n \
this License.\n \
\n \
Each contributor grants you a non-exclusive, worldwide, royalty-free\n \
patent license under the contributor's essential patent claims, to\n \
make, use, sell, offer for sale, import and otherwise run, modify and\n \
propagate the contents of its contributor version.\n \
\n \
In the following three paragraphs, a \"patent license\" is any express\n \
agreement or commitment, however denominated, not to enforce a patent\n \
(such as an express permission to practice a patent or covenant not to\n \
sue for patent infringement). To \"grant\" such a patent license to a\n \
party means to make such an agreement or commitment not to enforce a\n \
patent against the party.\n \
\n \
If you convey a covered work, knowingly relying on a patent license,\n \
and the Corresponding Source of the work is not available for anyone\n \
to copy, free of charge and under the terms of this License, through a\n \
publicly available network server or other readily accessible means,\n \
then you must either (1) cause the Corresponding Source to be so\n \
available, or (2) arrange to deprive yourself of the benefit of the\n \
patent license for this particular work, or (3) arrange, in a manner\n \
consistent with the requirements of this License, to extend the patent\n \
license to downstream recipients. \"Knowingly relying\" means you have\n \
actual knowledge that, but for the patent license, your conveying the\n \
covered work in a country, or your recipient's use of the covered work\n \
in a country, would infringe one or more identifiable patents in that\n \
country that you have reason to believe are valid.\n \
\n \
If, pursuant to or in connection with a single transaction or\n \
arrangement, you convey, or propagate by procuring conveyance of, a\n \
covered work, and grant a patent license to some of the parties\n \
receiving the covered work authorizing them to use, propagate, modify\n \
or convey a specific copy of the covered work, then the patent license\n \
you grant is automatically extended to all recipients of the covered\n \
work and works based on it.\n \
\n \
A patent license is \"discriminatory\" if it does not include within\n \
the scope of its coverage, prohibits the exercise of, or is\n \
conditioned on the non-exercise of one or more of the rights that are\n \
specifically granted under this License. You may not convey a covered\n \
work if you are a party to an arrangement with a third party that is\n \
in the business of distributing software, under which you make payment\n \
to the third party based on the extent of your activity of conveying\n \
the work, and under which the third party grants, to any of the\n \
parties who would receive the covered work from you, a discriminatory\n \
patent license (a) in connection with copies of the covered work\n \
conveyed by you (or copies made from those copies), or (b) primarily\n \
for and in connection with specific products or compilations that\n \
contain the covered work, unless you entered into that arrangement,\n \
or that patent license was granted, prior to 28 March 2007.\n \
\n \
Nothing in this License shall be construed as excluding or limiting\n \
any implied license or other defenses to infringement that may\n \
otherwise be available to you under applicable patent law.\n \
\n \
12. No Surrender of Others' Freedom.\n \
\n \
If conditions are imposed on you (whether by court order, agreement or\n \
otherwise) that contradict the conditions of this License, they do not\n \
excuse you from the conditions of this License. If you cannot convey a\n \
covered work so as to satisfy simultaneously your obligations under this\n \
License and any other pertinent obligations, then as a consequence you may\n \
not convey it at all. For example, if you agree to terms that obligate you\n \
to collect a royalty for further conveying from those to whom you convey\n \
the Program, the only way you could satisfy both those terms and this\n \
License would be to refrain entirely from conveying the Program.\n \
\n \
13. Use with the GNU Affero General Public License.\n \
\n \
Notwithstanding any other provision of this License, you have\n \
permission to link or combine any covered work with a work licensed\n \
under version 3 of the GNU Affero General Public License into a single\n \
combined work, and to convey the resulting work. The terms of this\n \
License will continue to apply to the part which is the covered work,\n \
but the special requirements of the GNU Affero General Public License,\n \
section 13, concerning interaction through a network will apply to the\n \
combination as such.\n \
\n \
14. Revised Versions of this License.\n \
\n \
The Free Software Foundation may publish revised and/or new versions of\n \
the GNU General Public License from time to time. Such new versions will\n \
be similar in spirit to the present version, but may differ in detail to\n \
address new problems or concerns.\n \
\n \
Each version is given a distinguishing version number. If the\n \
Program specifies that a certain numbered version of the GNU General\n \
Public License \"or any later version\" applies to it, you have the\n \
option of following the terms and conditions either of that numbered\n \
version or of any later version published by the Free Software\n \
Foundation. If the Program does not specify a version number of the\n \
GNU General Public License, you may choose any version ever published\n \
by the Free Software Foundation.\n \
\n \
If the Program specifies that a proxy can decide which future\n \
versions of the GNU General Public License can be used, that proxy's\n \
public statement of acceptance of a version permanently authorizes you\n \
to choose that version for the Program.\n \
\n \
Later license versions may give you additional or different\n \
permissions. However, no additional obligations are imposed on any\n \
author or copyright holder as a result of your choosing to follow a\n \
later version.\n \
\n \
15. Disclaimer of Warranty.\n \
\n \
THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY\n \
APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT\n \
HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM \"AS IS\" WITHOUT WARRANTY\n \
OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,\n \
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR\n \
PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM\n \
IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF\n \
ALL NECESSARY SERVICING, REPAIR OR CORRECTION.\n \
\n \
16. Limitation of Liability.\n \
\n \
IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING\n \
WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS\n \
THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY\n \
GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE\n \
USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF\n \
DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD\n \
PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),\n \
EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF\n \
SUCH DAMAGES.\n \
\n \
17. Interpretation of Sections 15 and 16.\n \
\n \
If the disclaimer of warranty and limitation of liability provided\n \
above cannot be given local legal effect according to their terms,\n \
reviewing courts shall apply local law that most closely approximates\n \
an absolute waiver of all civil liability in connection with the\n \
Program, unless a warranty or assumption of liability accompanies a\n \
copy of the Program in return for a fee.\n \
\n \
END OF TERMS AND CONDITIONS\n \
\n";
================================================
FILE: README.md
================================================
[](https://gitlab.cern.ch/dss/eos/commits/master)
# EOS
## Description
**EOS** is a software solution that aims to provide fast and reliable multi-PB
disk-only storage technology for both LHC and non-LHC use-cases at CERN. The
core of the implementation is the XRootD framework which provides feature-rich
remote access protocol. The storage system is running on commodity hardware
with disks in JBOD configuration. It is written mostly in C/C++, with some of
the extra modules in Python. Files can be accessed via native **XRootD**
protocol, a **POSIX-like FUSE** client or **HTTP(S) & WebDav** protocol.
## Documentation
The most up-to-date documentation can be found at:
[eos-docs.web.cern.ch/eos-docs](http://eos-docs.web.cern.ch/eos-docs/)
You will need to install Sphinx, Doxygen and the `solar_theme` (for Sphinx) in order to
generate the docs. For up-to-date information on getting Sphinx refer to the
[Sphinx docs](https://www.sphinx-doc.org/en/master/usage/installation.html).
```bash
## RHEL instructions
# Please choose the relevant python version based on the distro
sudo yum install python-sphinx doxygen
pip install solar_theme
## Ubuntu 20.04 instructions
sudo apt install python3-sphinx doxygen
pip3 install solar_theme
```
**Doxygen** documentation of the API is available in the `./doc` directory
and can be generated using the following command:
```bash
# Inside the EOS git clone directory
cd doc
doxygen
....
# Documentation generated in the ./html directory, viewable with any browser:
# file:///eos_git_clone_dir/doc/html/index.html
```
**Sphinx** documentation of installation and application is also in the `./doc` directory.
This is what is published at https://eos-docs.web.cern.ch.
Documentation can be generated using:
```bash
cd doc
make html
# Documentation can be found in build/html/index.html (view in a browser).
# The make interface supports other targets (e.g. latexpdf).
```
## Project directory structure
- `archive/`: Archive tool implementation in Python
- `auth_plugin/`: Authorization delegation plugin
- `authz/`: Authorization capability functionality
- `client/`: gRPC clients
- `cmake/`: CMake scripts and functions
- `common/`: Common helper files and classes
- `console/`: Command line client implementation
- `coverage/`: Test coverage config for LCOV
- `doc/`: Doxygen and Sphinx documentation
- `etc/`: Log rotation files
- `fst/`: The Storage Server Plugin (FST)
- `fusex/`: Next generation bi-directional FUSE mount client with high-end features
- `man/`: Manual pages
- `mgm/`: Metadata Namespace and Scheduling Redirector Plugin (MGM)
- `misc/`: systemd, sysconfig and service scripts
- `mq/`: Message Queue server plugin
- `namespace/`: Namespace implementation
- `nginx/`: Nginx patches for EOS integration
- `proto/`: Protobuf definitions for various components
- `test/`: Instance test scripts and dedicated test executables
- `unit_tests/`: Unit tests for individual modules
- `utils/`: Utilities and uninstall scripts
## Git submodules
Some components are maintained in separate upstream repositories and brought in as git submodules. Make sure submodules are initialized and kept up-to-date:
```bash
git submodule update --init --recursive
# To refresh later
git submodule update --recursive --remote
```
Submodules currently used:
- `quarkdb/`: QuarkDB client/server sources used by MGM for QuarkDB-backed services (e.g., QDB master, metadata/services that rely on QuarkDB).
- `common/xrootd-ssi-protobuf-interface/`: XRootD SSI + Protobuf interface headers used by EOS gRPC/SSI integrations and CTA-related workflows.
Tip: See `.gitmodules` for the authoritative list and remote URLs.
## Dependencies
Use the EOS Diopside dependency repository.
Follow the official installation instructions here:
[EOS Diopside Manual – Installation](https://eos-docs.web.cern.ch/diopside/manual/hardware-installation.html#installation).
```bash
yum install -y git gcc cmake cmake3 readline readline-devel fuse fuse-devel \
leveldb leveldb-devel binutils-devel zlib zlib-devel zlib-static \
bzip2 bzip2-devel libattr libattr-devel libuuid libuuid-devel \
xfsprogs xfsprogs-devel sparsehash-devel e2fsprogs e2fsprogs-devel \
openssl openssl-devel openssl-static eos-folly eos-rocksdb ncurses \
ncurses-devel ncurses-static protobuf3-devel openldap-devel \
hiredis-devel zeromq-devel jsoncpp-devel xrootd xrootd-server-devel \
xrootd-client-devel xrootd-private-devel cppzmq-devel libcurl-devel \
libevent-devel jemalloc jemalloc-devel
```
## Build
To build **EOS**, you need **gcc (>=7)** with **C++17 features** and **CMake**
installed on your system. If you can install ninja, **EOS** supports ninja for builds.
```bash
git submodule update --init --recursive
# Create build workdir
mkdir build-with-ninja
cd build-with-ninja
# Run CMake (pass -DCLIENT=1 if you only need the client binaries)
cmake3 -GNinja ..
# Build
ninja -j 4
```
Otherwise, standard Makefile builds are of course possible:
```bash
git submodule update --init --recursive
# Create build workdir
mkdir build
cd build
# Run CMake (pass -DCLIENT=1 if you only need the client binaries)
cmake3 ..
# Build
make -j 4
```
## Install/Uninstall
The default behaviour is to install **EOS** at system level using `CMAKE_INSTALL_PREFIX=/usr`.
To change the default install prefix path, do the following:
```bash
# Modify the default install path
cmake ../ -DCMAKE_INSTALL_PREFIX=/other_path
# if using ninja
ninja install
# Uninstall
ninja uninstall
# Install - might require sudo privileges
make install
# Uninstall
make uninstall
```
## Source/Binary RPM Generation
To build the source/binary RPMs run:
```bash
# Create source tarball
make dist
# Create Source RPM
make srpm
# Create RPM
make rpm
```
## Bug Reporting
You can send **EOS** bug reports to .
The preferable way, if you have access, is use the online bug tracking
system [Jira][2] to submit new problem reports or search for existing ones:
https://its.cern.ch/jira/browse/EOS
## EOS Community
For discussions and help, there is also the eos community which brings together
users, developers & collaborators at https://eos-community.web.cern.ch/
## Licence
**EOS - The CERN Disk Storage System**
**Copyright (C) 2025 CERN/Switzerland**
This program is free software: you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
Foundation, either version 3 of the License, or (at your option) any later
version. This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY, without even the implied warranty of MERCHANTABILITY
or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
details.
You should have received a copy of the GNU General Public License
along with this program. If not, see .
[1]: http://eos-docs.web.cern.ch/eos-docs/quickstart/setup_repo.html#eos-base-setup-repos
[2]: https://its.cern.ch/jira/secure/Dashboard.jspa
================================================
FILE: archive/CMakeLists.txt
================================================
# ----------------------------------------------------------------------
# File: CMakeLists.txt
# Author: Elvin-Alin Sindrilaru -
# ----------------------------------------------------------------------
# ************************************************************************
# * EOS - the CERN Disk Storage System *
# * Copyright (C) 2011 CERN/Switzerland *
# * *
# * This program is free software: you can redistribute it and/or modify *
# * it under the terms of the GNU General Public License as published by *
# * the Free Software Foundation, either version 3 of the License, or *
# * (at your option) any later version. *
# * *
# * This program is distributed in the hope that it will be useful, *
# * but WITHOUT ANY WARRANTY; without even the implied warranty of *
# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
# * GNU General Public License for more details. *
# * *
# * You should have received a copy of the GNU General Public License *
# * along with this program. If not, see .*
# ************************************************************************
if(PYTHONSITEPKG_FOUND)
install(PROGRAMS eosarchived.py eosarch_run.py eosarch_reconstruct.py
DESTINATION ${CMAKE_INSTALL_FULL_BINDIR}
PERMISSIONS OWNER_READ OWNER_EXECUTE
GROUP_READ GROUP_EXECUTE
WORLD_READ WORLD_EXECUTE)
install(DIRECTORY eosarch
DESTINATION ${PYTHONSITEPKG_PATH}
PATTERN "tests" EXCLUDE
PATTERN "*~" EXCLUDE
PERMISSIONS OWNER_READ OWNER_WRITE GROUP_READ WORLD_READ)
install(FILES opt-eos-xrootd.pth
DESTINATION ${PYTHONSITEPKG_PATH}
PERMISSIONS OWNER_READ OWNER_WRITE GROUP_READ WORLD_READ)
install(FILES eosarchived.conf
DESTINATION ${CMAKE_INSTALL_SYSCONFDIR}
PERMISSIONS OWNER_READ OWNER_WRITE
GROUP_READ
WORLD_READ)
# Installing files depending on service manager (systemd)
set(SYSTEMD_DIR /usr/lib/systemd/)
if(EXISTS ${SYSTEMD_DIR})
install(FILES eosarchived_env.sysconfig
DESTINATION ${CMAKE_INSTALL_SYSCONFDIR}/sysconfig/
RENAME eosarchived_env)
install(FILES eosarchived.service
DESTINATION ${CMAKE_INSTALL_PREFIX}/lib/systemd/system/)
endif()
endif()
================================================
FILE: archive/eosarch/__init__.py
================================================
#!/usr/bin/python3
# ------------------------------------------------------------------------------
# File: __init__.py
# Author: Elvin-Alin Sindrilaru
# ------------------------------------------------------------------------------
#
# ******************************************************************************
# EOS - the CERN Disk Storage System
# Copyright (C) 2014 CERN/Switzerland
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see .
# ******************************************************************************
"""This module provides access to EOS archives. It also facilitates the operations
that are done using such objects.
"""
from eosarch.configuration import Configuration
from eosarch.transfer import Transfer
from eosarch.processinfo import ProcessInfo
from eosarch.exceptions import NoErrorException, CheckEntryException
================================================
FILE: archive/eosarch/archivefile.py
================================================
#!/usr/bin/python3
# ------------------------------------------------------------------------------
# File: archivefile.py
# Author: Elvin-Alin Sindrilaru
# ------------------------------------------------------------------------------
#
# ******************************************************************************
# EOS - the CERN Disk Storage System
# Copyright (C) 2014 CERN/Switzerland
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see .
# ******************************************************************************
""" Class modelling an EOS archive file.
"""
from __future__ import unicode_literals
import logging
import json
from XRootD import client
from XRootD.client.flags import QueryCode
from eosarch.utils import is_atomic_version_file, seal_path
from eosarch.utils import exec_cmd, get_entry_info, set_dir_info
from eosarch.exceptions import CheckEntryException
class ArchiveFile(object):
""" Class modelling an EOS archive file.
Attributes:
file: File object pointing to local archive file.
d2t: True if operation from disk to tape, otherwise False. For backup
operations we consider it as a transfer from tape to disk thus it
is False.
header: Archive header dictionary.
"""
def __init__(self, path, d2t):
"""Initialize ArchiveFile object.
Args:
path (str): Local path to archive file.
d2t (bool): True if transfer is to be disk to tape.
Raises:
IOError: Failed to open local transfer file.
"""
self.logger = logging.getLogger("transfer")
self.d2t = d2t
try:
self.file = open(path, 'r')
except IOError as __:
self.logger.error("Failed to open file={0}".format(path))
raise
line = self.file.readline()
self.header = json.loads(line)
self.fseek_dir = self.file.tell() # save start position for dirs
pos = self.fseek_dir
while line:
line = self.file.readline()
entry = json.loads(line)
if entry[0] == 'f':
self.fseek_file = pos # save start position for files
break
pos = self.file.tell()
# Create two XRootD.FileSystem object for source and destination
# which are to be reused throughout the transfer.
self.fs_src = client.FileSystem(self.header['src'])
self.fs_dst = client.FileSystem(self.header['dst'])
self.logger.debug("fseek_dir={0}, fseek_file={1}".format(self.fseek_dir,
self.fseek_file))
def __del__(self):
"""Destructor needs to close the file.
"""
try:
self.file.close()
except ValueError as __:
self.logger.warning("File={0} already closed".format(self.file.name))
def dirs(self):
"""Generator to read directory entries from the archive file.
Returns:
Return a directory entry from the archive file which looks like
this: ['d', "./rel/path/dir", "val1", ,"val2" ... ]
"""
self.file.seek(self.fseek_dir)
line = self.file.readline()
while line:
dentry = json.loads(line)
if dentry[0] == 'd':
yield dentry
line = self.file.readline()
else:
break
def files(self):
"""Generator to read file entries from the archive file.
Returns:
Return a file entry from the archive file which looks like this:
['f', "./rel/path/file", "val1", ,"val2" ... ]
"""
self.file.seek(self.fseek_file)
line = self.file.readline()
while line:
fentry = json.loads(line)
if fentry[0] == 'f':
yield fentry
line = self.file.readline()
else:
break
def entries(self):
""" Generator to read all entries from the archive file.
Return:
A list representing a file or directory entry. See above for the
actual format.
"""
for dentry in self.dirs():
yield dentry
for fentry in self.files():
yield fentry
def get_fs(self, url):
""" Get XRootD.FileSystem object matching the host in the url.
Args:
url (string): XRootD endpoint URL.
Returns:
FileSystem object to be used or None.
"""
if url.startswith(self.header['src']):
return self.fs_src
elif url.startswith(self.header['dst']):
return self.fs_dst
else:
return None
def get_endpoints(self, rel_path):
"""Get full source and destination URLs for the given relative path.
For this use the information from the header. Take into account whether
it is a disk to tape transfer or not. The src in header is always the
disk and dst is the tape.
Args:
rel_path (str): Entry relative path.
Returns:
Return a tuple of string representing the source and the destination
of the transfer.
"""
if rel_path == "./":
rel_path = ""
src = self.header['src'] + rel_path
dst = self.header['dst'] + rel_path
if self.header['svc_class']:
dst = ''.join([dst, "?svcClass=", self.header['svc_class']])
return (src, dst) if self.d2t else (dst, src)
def del_entry(self, rel_path, is_dir, tape_delete):
""" Delete file/dir. For directories it is successful only if the dir
is empty. For deleting the subtree rooted in a directory one needs to
use the del_subtree method.
Args:
rel_path (str): Entry relative path as stored in the archive file.
is_dir (bool): True is entry is dir, otherwise False.
tape_delete(bool): If tape_delete is None the delete comes from a
PUT or GET operations so we only use the value of self.d2t to
decide which entry we will delete. If tape_delete is True we
delete tape data, otherwise we purge (delete from disk only).
Raises:
IOError: Deletion could not be performed.
"""
src, dst = self.get_endpoints(rel_path)
if tape_delete is None:
surl = dst # self.d2t is already used inside get_endpoints
else:
surl = src if tape_delete else dst
url = client.URL(surl)
fs = self.get_fs(surl)
self.logger.debug("Delete entry={0}".format(surl))
if is_dir:
st_rm, __ = fs.rmdir((url.path + "?eos.ruid=0&eos.rgid=0"))
else:
st_rm, __ = fs.rm((url.path + "?eos.ruid=0&eos.rgid=0"))
if not st_rm.ok:
# Check if entry exists
st_stat, __ = fs.stat(url.path)
if st_stat.ok:
err_msg = "Error removing entry={0}".format(surl)
self.logger.error(err_msg)
raise IOError()
self.logger.warning("Entry={0} already removed".format(surl))
def del_subtree(self, rel_path, tape_delete):
""" Delete the subtree rooted at the provided path. Walk through all
the files and delete them one by one then proceding with the directories
from the deepest one to the root.
Args:
rel_path (string): Relative path to the subtree
tape_delete (boolean or None): If present and true this is a
deletion otherwise is a purge operation
Raises:
IOError: Deletion could not be performed
"""
self.logger.debug("Del subtree for path={0}".format(rel_path))
lst_dirs = []
for fentry in self.files():
path = fentry[1]
# Delete only files rooted in current subtree
if path.startswith(rel_path):
self.del_entry(path, False, tape_delete)
for dentry in self.dirs():
path = dentry[1]
if rel_path == "./" or path.startswith(rel_path):
# Never delete the root path
if path != "./":
lst_dirs.append(path)
# Reverse the list so that we start deleting deepest (empty) dirs first
lst_dirs.reverse()
for path in lst_dirs:
self.del_entry(path, True, tape_delete)
def make_mutable(self):
""" Make the EOS sub-tree pointed by header['src'] mutable.
Raises:
IOError when operation fails.
"""
url = client.URL(self.header['src'])
for dentry in self.dirs():
dir_path = url.path + dentry[1]
fgetattr = ''.join([url.protocol, "://", url.hostid, "//proc/user/",
"?mgm.cmd=attr&mgm.subcmd=get&mgm.attr.key=sys.acl",
"&mgm.path=", seal_path(dir_path)])
(status, stdout, __) = exec_cmd(fgetattr)
if not status:
warn_msg = "No xattr sys.acl found for dir={0}".format(dir_path)
self.logger.warning(warn_msg)
else:
# Remove the 'z:i' rule from the acl list
stdout = stdout.replace('"', '')
acl_val = stdout[stdout.find('=') + 1:]
rules = acl_val.split(',')
new_rules = []
for rule in rules:
if rule.startswith("z:"):
tag, definition = rule.split(':')
pos = definition.find('i')
if pos != -1:
definition = definition[:pos] + definition[pos + 1:]
if definition:
new_rules.append(':'.join([tag, definition]))
continue
new_rules.append(rule)
acl_val = ','.join(new_rules)
self.logger.info("new acl: {0}".format(acl_val))
if acl_val:
# Set the new sys.acl xattr
fmutable = ''.join([url.protocol, "://", url.hostid, "//proc/user/?",
"mgm.cmd=attr&mgm.subcmd=set&mgm.attr.key=sys.acl",
"&mgm.attr.value=", acl_val, "&mgm.path=", dir_path])
(status, __, stderr) = exec_cmd(fmutable)
if not status:
err_msg = "Error making dir={0} mutable, msg={1}".format(
dir_path, stderr)
self.logger.error(err_msg)
raise IOError(err_msg)
else:
# sys.acl empty, remove it from the xattrs
frmattr = ''.join([url.protocol, "://", url.hostid, "//proc/user/?",
"mgm.cmd=attr&mgm.subcmd=rm&mgm.attr.key=sys.acl",
"&mgm.path=", dir_path])
(status, __, stderr) = exec_cmd(frmattr)
if not status:
err_msg = ("Error removing xattr=sys.acl for dir={0}, msg={1}"
"").format(dir_path, stderr)
self.logger.error(err_msg)
raise IOError(err_msg)
def check_root_dir(self):
""" Do the necessary checks for the destination directory depending on
the type of the transfer.
Raises:
IOError: Root dir state inconsistent.
"""
root_str = self.header['dst' if self.d2t else 'src']
fs = self.get_fs(root_str)
url = client.URL(root_str)
arg = url.path + "?eos.ruid=0&eos.rgid=0"
st, __ = fs.stat(arg)
if self.d2t:
if st.ok:
# For PUT destination dir must NOT exist
err_msg = "Root PUT dir={0} exists".format(root_str)
self.logger.error(err_msg)
raise IOError(err_msg)
else:
# Make sure the rest of the path exists as for the moment CASTOR
# mkdir -p /path/to/file does not work properly
pos = url.path.find('/', 1)
while pos != -1:
dpath = url.path[: pos]
pos = url.path.find('/', pos + 1)
st, __ = fs.stat(dpath)
if not st.ok:
st, __ = fs.mkdir(dpath)
if not st.ok:
err_msg = ("Dir={0} failed mkdir errmsg={1}"
"").format(dpath, st.message)
self.logger.error(err_msg)
raise IOError(err_msg)
elif not self.d2t:
# For GET destination must exist and contain just the archive file
if not st.ok:
err_msg = "Root GET dir={0} does NOT exist".format(root_str)
self.logger.error(err_msg)
raise IOError(err_msg)
else:
ffindcount = ''.join([url.protocol, "://", url.hostid,
"//proc/user/?mgm.cmd=find&mgm.path=",
seal_path(url.path), "&mgm.option=Z"])
(status, stdout, stderr) = exec_cmd(ffindcount)
if status:
for entry in stdout.split():
tag, num = entry.split('=')
if ((tag == 'nfiles' and num not in ['1', '2']) or
(tag == 'ndirectories' and num != '1')):
err_msg = ("Root GET dir={0} should contain at least "
"one file and at most two - clean up and "
"try again").format(root_str)
self.logger.error(err_msg)
raise IOError(err_msg)
else:
err_msg = ("Error doing find count on GET destination={0}"
", msg={1}").format(root_str, stderr)
self.logger.error(err_msg)
raise IOError(err_msg)
def verify(self, best_effort, tx_check_only=False):
""" Check the integrity of the archive either on disk or on tape.
Args:
best_effort (boolean): If True then try to verify all entries even if
we get an error during the check. This is used for the backup while
for the archive, we return as soon as we find the first error.
tx_check_only (boolean): If True then only check the existence of the
entry, the size and checksum value. This is done only for archive
GET operations.
Returns:
(status, lst_failed) - Status is True if archive is valid, otherwise
false. In case the archive has errors return also the first corrupted
entry from the archive file, otherwise return an empty list.
For BACKUP operations return the status and the list of entries for
which the verfication failed in order to provide a summary to the user.
"""
self.logger.info("Do transfer verification")
status = True
lst_failed = []
for entry in self.entries():
try:
self._verify_entry(entry, tx_check_only)
except CheckEntryException as __:
lst_failed.append(entry)
status = False
if best_effort:
continue
else:
break
return (status, lst_failed)
def _verify_entry(self, entry, tx_check_only):
""" Check that the entry (file/dir) has the proper meta data.
Args:
entry (list): Entry from the arhive file containing all info about
this particular file/directory.
tx_check_only (boolean): If True then for files only check their
existence, size and checksum values.
Raises:
CheckEntryException: if entry verification fails.
"""
self.logger.debug("Verify entry={0}".format(entry))
is_dir, path = (entry[0] == 'd'), entry[1]
__, dst = self.get_endpoints(path)
url = client.URL(dst)
if self.d2t: # for PUT check entry size and checksum if possible
fs = self.get_fs(dst)
st, stat_info = fs.stat(url.path)
if not st.ok:
err_msg = "Entry={0} failed stat".format(dst)
self.logger.error(err_msg)
raise CheckEntryException("failed stat")
if not is_dir: # check file size match
indx = self.header["file_meta"].index("size") + 2
orig_size = int(entry[indx])
if stat_info.size != orig_size:
err_msg = ("Verify entry={0}, expect_size={1}, size={2}"
"").format(dst, orig_size, stat_info.size)
self.logger.error(err_msg)
raise CheckEntryException("failed file size match")
# Check checksum only if it is adler32 - only one supported by CASTOR
indx = self.header["file_meta"].index("xstype") + 2
# !!!HACK!!! Check the checksum only if file size is not 0 since
# CASTOR does not store any checksum for 0 size files
if stat_info.size != 0 and entry[indx] == "adler":
indx = self.header["file_meta"].index("xs") + 2
xs = entry[indx]
st, xs_resp = fs.query(QueryCode.CHECKSUM, url.path)
if not st.ok:
err_msg = "Entry={0} failed xs query".format(dst)
self.logger.error(err_msg)
raise CheckEntryException("failed xs query")
# Result has an annoying \x00 character at the end and it
# contains the xs type (adler32) and the xs value
resp = xs_resp.split(b'\x00')[0].split()
# If checksum value is not 8 char long then we need padding
if len(resp[1]) != 8:
resp[1] = "{0:0>8}".format(resp[1])
if resp[0] == "adler32" and resp[1] != xs:
err_msg = ("Entry={0} xs value missmatch xs_expected={1} "
"xs_got={2}").format(dst, xs, resp[1])
self.logger.error(err_msg)
raise CheckEntryException("xs value missmatch")
else: # for GET check all metadata
if is_dir:
tags = self.header['dir_meta']
else:
tags = self.header['file_meta']
try:
if self.header['twindow_type'] and self.header['twindow_val']:
dfile = dict(zip(tags, entry[2:]))
twindow_sec = int(self.header['twindow_val'])
tentry_sec = int(float(dfile[self.header['twindow_type']]))
if tentry_sec < twindow_sec:
# No check for this entry
return
# This is a backup so don't check atomic version files
if is_atomic_version_file(entry[1]):
return
except KeyError as __:
# This is not a backup transfer but an archive one, carry on
pass
try:
meta_info = get_entry_info(url, path, tags, is_dir)
except (AttributeError, IOError, KeyError) as __:
self.logger.error("Failed getting metainfo entry={0}".format(dst))
raise CheckEntryException("failed getting metainfo")
# Check if we have any excluded xattrs
try:
excl_xattr = self.header['excl_xattr']
except KeyError as __:
excl_xattr = list()
if is_dir and excl_xattr:
# For directories and configurations containing excluded xattrs
# we refine the checks. If "*" in excl_xattr then no check is done.
if "*" not in excl_xattr:
ref_dict = dict(zip(tags, entry[2:]))
new_dict = dict(zip(tags, meta_info[2:]))
for key, val in ref_dict.items():
if not isinstance(val, dict):
if new_dict[key] != val:
err_msg = ("Verify failed for entry={0} expect={1} got={2}"
" at key={3}").format(dst, entry, meta_info, key)
self.logger.error(err_msg)
raise CheckEntryException("failed metainfo match")
else:
for kxattr, vxattr in val.items():
if kxattr not in excl_xattr:
if vxattr != new_dict[key][kxattr]:
err_msg = ("Verify failed for entry={0} expect={1} got={2}"
" at xattr key={3}").format(dst, entry, meta_info, kxattr)
self.logger.error(err_msg)
raise CheckEntryException("failed metainfo match")
else:
# For files with tx_check_only verification, we refine the checks
if tx_check_only and not is_dir:
idx_size = self.header["file_meta"].index("size") + 2
idx_xstype = self.header["file_meta"].index("xstype") + 2
idx_xsval = self.header["file_meta"].index("xs") + 2
if (meta_info[idx_size] != entry[idx_size] or
meta_info[idx_xstype] != entry[idx_xstype] or
meta_info[idx_xsval] != entry[idx_xsval]):
err_msg = ("Partial verify failed for entry={0} expect={1} got={2}"
"").format(dst, entry, meta_info)
self.logger.error(err_msg)
raise CheckEntryException("failed metainfo partial match")
else:
if is_dir:
# Compensate for the removal fo the S_ISGID bit
mask_mode = int("02000", base=8)
val_mode = int(entry[4], base=8)
val_mode |= mask_mode
compat_entry = list(entry)
compat_entry[4] = "{0:o}".format(val_mode)
else:
compat_entry = list(entry)
if not meta_info == entry and not compat_entry == entry:
err_msg = ("Verify failed for entry={0} expect={1} got={2}"
"").format(dst, entry, meta_info)
self.logger.error(err_msg)
raise CheckEntryException("failed metainfo match")
self.logger.info("Entry={0}, status={1}".format(dst, True))
def mkdir(self, dentry):
""" Create directory and optionally for GET operations set the
metadata information.
Args:
dentry (list): Directory entry as read from the archive file.
Raises:
IOError: Directory creation failed.
"""
__, surl = self.get_endpoints(dentry[1])
fs = self.get_fs(surl)
url = client.URL(surl)
# Create directory if not already existing
st, __ = fs.stat((url.path + "?eos.ruid=0&eos.rgid=0"))
if not st.ok:
if not self.d2t:
st, __ = fs.mkdir((url.path + "?eos.ruid=0&eos.rgid=0"))
else:
st, __ = fs.mkdir((url.path))
if not st.ok:
err_msg = ("Dir={0} failed mkdir errmsg={1}, errno={2}, code={3}"
"").format(surl, st.message, st.errno, st.code)
self.logger.error(err_msg)
raise IOError(err_msg)
# For GET operations set also the metadata
if not self.d2t:
dict_dinfo = dict(zip(self.header['dir_meta'], dentry[2:]))
# Get the list of excluded extended attributes if it exists
try:
excl_xattr = self.header['excl_xattr']
except KeyError as __:
excl_xattr = list()
try:
set_dir_info(surl, dict_dinfo, excl_xattr)
except IOError as __:
err_msg = "Dir={0} failed setting metadata".format(surl)
self.logger.error(err_msg)
raise IOError(err_msg)
================================================
FILE: archive/eosarch/asynchandler.py
================================================
#!/usr/bin/python3
# ------------------------------------------------------------------------------
# File: asynchandler.py
# Author: Elvin-Alin Sindrilaru
# ------------------------------------------------------------------------------
#
# ******************************************************************************
# EOS - the CERN Disk Storage System
# Copyright (C) 2014 CERN/Switzerland
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see .
# ******************************************************************************
""" Objects used for handling asynchronous XRootD requests.
"""
import logging
from threading import Condition
class _MkDirHandler(object):
""" Async mkdir handler which reports to MetaHandler.
Attributes:
path (string): Directory path for which the handler is created.
meta_handler (MetaHandler): Meta handler object.
"""
def __init__(self, path, meta_handler):
self.type = 'mkdir'
self.path = path
self.meta_handler = meta_handler
def __call__(self, status, response, hostlist):
self.meta_handler.handle(self.type, status, self.path)
class _PrepareHandler(object):
""" Async prepare handler which reports to MetaHandler.
Attributes:
path (string): Directory path for which the handler is created.
meta_handler (MetaHandler): Meta handler object.
"""
def __init__(self, path, meta_handler):
self.type = 'prepare'
self.path = path
self.meta_handler = meta_handler
def __call__(self, status, response, hostlist):
self.meta_handler.handle(self.type, status, self.path)
class _QueryHandler(object):
""" Async query handler which reports to MetaHandler.
Attributes:
path (string): File path for which the handler is created.
meta_handler (MetaHandler): Meta handler object.
"""
def __init__(self, path, meta_handler):
self.type = 'query'
self.path = path
self.meta_handler = meta_handler
def __call__(self, status, response, hostlist):
self.meta_handler.handle(self.type, status, self.path)
class MetaHandler(object):
""" Meta handler for different types of async requests.
Attributes:
cond: Condition variable used for synchronization.
logger: Logger object.
mkdir_failed: List of directories failed to create.
mkdir_status: Status of mkdir requests, logical and between individual
mkdir commands.
mkdir_num: Number of mkdir commands waiting for reply.
"""
def __init__(self):
list_op = ['mkdir', 'prepare', 'query']
self.num, self.status, self.err_msg, self.failed = {}, {}, {}, {}
self.handlers = {'mkdir': _MkDirHandler,
'prepare': _PrepareHandler,
'query': _QueryHandler}
for op in list_op:
self.num[op] = 0
self.status[op] = True
self.err_msg[op] = ""
self.failed[op] = []
self.cond = Condition()
self.logger = logging.getLogger("transfer")
def register(self, op, path):
""" Register handler for operation.
"""
self.cond.acquire()
self.num[op] += 1
self.cond.release()
return self.handlers[op](path, self)
def handle(self, op, status, path):
"""Handle incoming response.
"""
self.cond.acquire()
self.status[op] = self.status[op] and status.ok
self.num[op] -= 1
if not status.ok:
self.failed[op].append(path)
self.err_msg[op] = status.message
if self.num[op] == 0:
self.cond.notifyAll()
self.cond.release()
def wait(self, op):
"""Wait for all responses to arrive.
"""
self.cond.acquire()
while self.num[op] != 0:
self.cond.wait()
if self.failed[op]:
self.logger.error(("List of failed {0} paths is: {1}, err_msg= {2}"
"").format(op, self.failed[op], self.err_msg[op]))
else:
self.logger.debug("All {0} requests were successful".format(op))
self.cond.release()
return self.status[op]
================================================
FILE: archive/eosarch/configuration.py
================================================
# ------------------------------------------------------------------------------
# File: configuration.py
# Author: Elvin-Alin Sindrilaru
# ------------------------------------------------------------------------------
#
# ******************************************************************************
# EOS - the CERN Disk Storage System
# Copyright (C) 2014 CERN/Switzerland
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see .
# ******************************************************************************
""" Class holding information about the configuration parameters used by both
the eosarchived daemon and also each individual transfer process.
"""
from __future__ import unicode_literals
from __future__ import print_function
import os
import sys
import logging
import logging.handlers
class Configuration(object):
""" Configuration class for the archiving daemon and the transfer processes.
"""
def __init__(self):
""" Initialize the configuration by reading in all the parameters from
the configuration file supplied. First of all, get any environment
variables and setup constants based on them.
Args:
fn_conf (string): Path to the configuration file, which in normal
conditions should be /etc/eosarchived.conf
"""
try:
LOG_DIR = os.environ["LOG_DIR"]
except KeyError as __:
print("LOG_DIR env. not found", file=sys.stderr)
raise
try:
self.__dict__['EOS_ARCHIVE_DIR'] = os.environ["EOS_ARCHIVE_DIR"]
except KeyError as __:
print("EOS_ARCHIVE_DIR env. not found", file=sys.stderr)
raise
try:
archive_conf = os.environ["EOS_ARCHIVE_CONF"]
except KeyError as __:
print("EOS_ARCHIVE_CONF env. not found using /etc/eosarchived.conf", file=sys.stderr)
archive_conf = "/etc/eosarchived.conf"
log_dict = {"debug": logging.DEBUG,
"notice": logging.INFO,
"info": logging.INFO,
"warning": logging.WARNING,
"error": logging.ERROR,
"crit": logging.CRITICAL,
"alert": logging.CRITICAL}
self.__dict__['FRONTEND_IPC'] = ''.join([self.__dict__['EOS_ARCHIVE_DIR'],
"archive_frontend.ipc"])
self.__dict__['BACKEND_REQ_IPC'] = ''.join([self.__dict__['EOS_ARCHIVE_DIR'],
"archive_backend_req.ipc"])
self.__dict__['BACKEND_PUB_IPC'] = ''.join([self.__dict__['EOS_ARCHIVE_DIR'],
"archive_backend_pub.ipc"])
self.__dict__['LOG_FILE'] = LOG_DIR + "eosarchived.log"
self.__dict__['CREATE_OP'] = 'create'
self.__dict__['GET_OP'] = 'get'
self.__dict__['PUT_OP'] = 'put'
self.__dict__['TX_OP'] = 'transfers'
self.__dict__['PURGE_OP'] = 'purge'
self.__dict__['DELETE_OP'] = 'delete'
self.__dict__['KILL_OP'] = 'kill'
self.__dict__['BACKUP_OP'] = 'backup'
self.__dict__["STATS"] = 'stats'
self.__dict__['OPT_RETRY'] = 'retry'
self.__dict__['OPT_FORCE'] = 'force'
self.__dict__['ARCH_FN'] = ".archive"
self.__dict__['ARCH_INIT'] = ".archive.init"
self.__dict__['ARCHIVE_MAX_TIMEOUT'] = '86400'
try:
with open(archive_conf, 'r') as f:
for line in f:
line = line.strip('\0\n ')
if len(line) and line[0] != '#':
tokens = line.split('=', 1)
# Try to convert to int by default
try:
self.__dict__[tokens[0]] = int(tokens[1])
except ValueError as __:
if tokens[0] == 'LOG_LEVEL':
self.__dict__[tokens[0]] = log_dict[tokens[1]]
else:
self.__dict__[tokens[0]] = tokens[1]
except IOError as __:
print("Unable to open config file: {0}".format(archive_conf), file=sys.stderr)
raise
# If no loglevel is set use INFO
try:
self.__dict__['LOG_LEVEL']
except KeyError as __:
self.__dict__['LOG_LEVEL'] = logging.INFO
# Mapping between operation type and store path for transfer and log files
self.__dict__['DIR'] = {}
self.logger, self.handler = None, None
def start_logging(self, logger_name, log_file, timed_rotating = False):
""" Configure the logging
Args:
logger_name (string): Name of the logger
timed_rotating (boolean): If True is a TimedRotatingFileHandler
"""
log_format = ('%(asctime)-15s %(name)s[%(process)d] %(filename)s:'
'%(lineno)d LVL=%(levelname)s %(message)s')
logging.basicConfig(level=self.__dict__['LOG_LEVEL'], format=log_format)
self.__dict__['LOGGER_NAME'] = logger_name
self.__dict__['LOG_FILE'] = log_file
self.logger = logging.getLogger(self.__dict__['LOGGER_NAME'])
formatter = logging.Formatter(log_format)
permissions = 0o644;
if timed_rotating:
self.handler = logging.handlers.TimedRotatingFileHandler(
self.__dict__['LOG_FILE'], 'midnight', encoding="utf-8")
else:
self.handler = logging.FileHandler(self.__dict__['LOG_FILE'],
encoding="utf-8")
try:
os.chmod(self.__dict__['LOG_FILE'], permissions)
except OSError as ex:
# If we don't have access to change the permissions, we need to
# rely on the initial file creator having done the chmod
pass
self.handler.setFormatter(formatter)
self.logger.addHandler(self.handler)
self.logger.propagate = False
def display(self):
""" Print configuration either to the log file or stderr
"""
try:
self.logger.info("Configuration parameters:")
for key, val in self.__dict__.items():
if key.isupper():
self.logger.info("conf.{0} = {1}".format(key, val))
except AttributeError as __:
print("Configuration parameters:", file=sys.stderr)
for key, val in self.__dict__.items():
if key.isupper():
print("conf.{0} = {1}".format(key, val), file=sys.stderr)
def __setattr__(self, name, value):
""" Set object attribute
Args:
name (string): Attribute name
value (string): Attribute value
"""
self.__dict__[name] = value
================================================
FILE: archive/eosarch/exceptions.py
================================================
# ------------------------------------------------------------------------------
# File: exceptions.py
# Author: Elvin-Alin Sindrilaru
# ------------------------------------------------------------------------------
#
# ******************************************************************************
# EOS - the CERN Disk Storage System
# Copyright (C) 2014 CERN/Switzerland
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see .
# ******************************************************************************
""" Module containing user defined exceptions."""
class NoErrorException(Exception):
""" Exception raised in case we were requested to retry an operation but
after the initial check there were no errors found.
"""
pass
class CheckEntryException(Exception):
""" Exception raised in cache a verify entry operation failes.
"""
pass
class NotOnTapeException(Exception):
""" Exception raised when a file is not on tape after the maximum
configured timeout per entry
"""
================================================
FILE: archive/eosarch/processinfo.py
================================================
# ------------------------------------------------------------------------------
# File: processinfo.py
# Author: Elvin-Alin Sindrilaru
# ------------------------------------------------------------------------------
#
# ******************************************************************************
# EOS - the CERN Disk Storage System
# Copyright (C) 2014 CERN/Switzerland
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see .
# ******************************************************************************
""" Class emulating the process information for an archive/backup transfer
which is used by the eosarchived daemon to display the status of the
ongoing transfers.
"""
import time
import logging
from os import kill
from hashlib import sha256
class ProcessInfo(object):
""" Class containing information about a process. It can also hold information
about an orphan process and in this case the self.proc is None.
Attributes:
proc (Process): Process object
uid (int): UID of transfer owner
gid (int): GID of transfer owner
uuid (string): uuid of transfer
pid (int): PID of process executing the transfer
root_dir (string): Root directory in EOS of the archive/backup
op (string): Operation type
orig_req (JSON): JSON object representing the original request
"""
def __init__(self, req_json = None):
""" Initializing the process info object
Args:
req_json (JSON): Json object containing the following information:
cmd: type of operation
src: EOS url to the archive/backup file
uid: UID of the user triggering the archiving
gid: GID of the user triggering the archiving
"""
self.logger = logging.getLogger("dispatcher")
self.proc = None
self.orig_req = req_json;
if req_json:
# Normal, 'owned' process
self.uid = int(req_json['uid'])
self.gid = int(req_json['gid'])
self.status = "pending"
self.pid, self.op = 0, req_json['cmd']
# Extract the archive/backup root directory path
src = req_json['src']
pos = src.find("//", src.find("//") + 1) + 1
self.root_dir = src[pos : src.rfind('/') + 1]
self.uuid = sha256(self.root_dir.encode()).hexdigest()
self.timestamp = time.time()
def update(self, dict_info):
""" Update process information
Args:
dict_info (dict): Dictionary containing the following information
about an orphan process: uuid, pid, root_dir, op, status, uid, gid.
If this is not the orphan discovery step then we only have the
status field.
"""
self.status = dict_info['status']
try:
# Update for orphan processes if information present
self.uuid = dict_info['uuid']
self.root_dir = dict_info['root_dir']
self.op = dict_info['op']
self.status = dict_info['status']
self.pid = int(dict_info['pid'])
self.uid = int(dict_info['uid'])
self.gid = int(dict_info['gid'])
self.timestamp = float(dict_info['timestamp'])
except KeyError as __:
# This response is only a status update
pass
def is_alive(self):
""" Check if the underlying process is alive. For processes started
by the current dispatcher i.e. for which we hold a reference to the
Process object we can use is_alive() method, for orphan processes
we use the OS functionality and send it a signal to check if it is
still running.
Returns:
True if process alive, false otherwise
"""
if self.proc:
ret = self.proc.poll()
if ret != None:
info_msg = ("Uuid={0}, pid={1}, op={2}, path={3} has terminated "
"returncode={4}").format(self.uuid, self.pid, self.op,
self.root_dir, ret)
self.logger.info(info_msg)
return False
else:
try:
kill(self.pid, 0)
except OSError as __:
dbg_msg = ("Uuid={0}, pid={1}, op={2}, path={3} has terminated - "
"no returncode available").format(self.uuid, self.pid,
self.op, self.root_dir)
self.logger.debug(dbg_msg)
return False
return True
================================================
FILE: archive/eosarch/tests/__init__.py
================================================
# ------------------------------------------------------------------------------
# File: __init__.py
# Author: Elvin-Alin Sindrilaru
# ------------------------------------------------------------------------------
#
# ******************************************************************************
# EOS - the CERN Disk Storage System
# Copyright (C) 2014 CERN/Switzerland
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see .
# ******************************************************************************
================================================
FILE: archive/eosarch/tests/env.py
================================================
# ------------------------------------------------------------------------------
# File: env.py
# Author: Elvin-Alin Sindrilaru
# ------------------------------------------------------------------------------
#
# ******************************************************************************
# EOS - the CERN Disk Storage System
# Copyright (C) 2014 CERN/Switzerland
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see .
# ******************************************************************************
SERVER_URL="root://localhost/"
EOS_DIR="/eos/dev/test/"
LOCAL_FILE="test_file.dat"
================================================
FILE: archive/eosarch/tests/test_archivefile.py
================================================
# ------------------------------------------------------------------------------
# File: test_archivefile.py
# Author: Elvin-Alin Sindrilaru
# ------------------------------------------------------------------------------
#
# ******************************************************************************
# EOS - the CERN Disk Storage System
# Copyright (C) 2014 CERN/Switzerland
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see .
# ******************************************************************************
import os
import unittest
import json
from archivefile.utils import exec_cmd
from archivefile.archivefile import ArchiveFile
from XRootD import client
from env import *
def test_exec_cmd():
"""Check the exec command.
List directory extended attributes from EOS local instance.
"""
url = client.URL(''.join([SERVER_URL, EOS_DIR]))
flsattr = ''.join([url.protocol, "://", url.hostid, "//proc/user/",
"?mgm.cmd=attr&mgm.subcmd=ls&mgm.path=", EOS_DIR])
(status, stdout, __) = exec_cmd(flsattr)
assert(status)
class TestArchiveFile(unittest.TestCase):
""" Unittest class for ArchiveFile."""
def setUp(self):
""" SetUp function."""
self.local_path = os.getcwd() + '/' + LOCAL_FILE
self.d2t = True
self.arch = ArchiveFile(self.local_path, self.d2t)
def tearDown(self):
"""TearDown function."""
pass
def test_list_dirs(self):
"""Check generator dir listing.
"""
with open(self.local_path, 'r') as farch:
_ = farch.readline() # skip the header
for dentry in self.arch.dirs():
for line in farch:
entry = json.loads(line)
if entry[0] == 'd':
self.assertEqual(entry, dentry)
break
def test_list_files(self):
"""Check generator file listing.
"""
with open(self.local_path, 'r') as farch:
_ = farch.readline() # skip the header
for fentry in self.arch.files():
for line in farch:
entry = json.loads(line)
if entry[0] == 'f':
self.assertEqual(entry, fentry)
break
def test_list_entries(self):
"""Check generator of all entries.
"""
with open(self.local_path, 'r') as farch:
_ = farch.readline() # skip the header
for aentry in self.arch.entries():
for line in farch:
entry = json.loads(line)
self.assertEqual(entry, aentry)
break
def test_get_endpoints(self):
"""Check endpoints based on transfer type.
"""
for aentry in self.arch.entries():
src, dst = self.arch.get_endpoints(aentry[1])
self.assertTrue(src.find(self.arch.header['src']) == 0)
self.assertTrue(dst.find(self.arch.header['dst']) == 0)
self.d2t = False # test tape to disk
self.arch = ArchiveFile(self.local_path, self.d2t)
for aentry in self.arch.entries():
src, dst = self.arch.get_endpoints(aentry[1])
self.assertTrue(src.find(self.arch.header['dst']) == 0)
self.assertTrue(dst.find(self.arch.header['src']) == 0)
================================================
FILE: archive/eosarch/transfer.py
================================================
# ------------------------------------------------------------------------------
# File: tranfer.py
# Author: Elvin-Alin Sindrilaru
# ------------------------------------------------------------------------------
#
# ******************************************************************************
# EOS - the CERN Disk Storage System
# Copyright (C) 2014 CERN/Switzerland
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see .
# ******************************************************************************
"""Module responsible for executing the transfers.
"""
from __future__ import unicode_literals
from __future__ import division
import os
import time
import logging
import threading
import zmq
import ast
from os.path import join
from time import sleep
from random import randrange
from hashlib import sha256
from XRootD import client
from XRootD.client.flags import PrepareFlags, QueryCode, OpenFlags, StatInfoFlags
from eosarch.archivefile import ArchiveFile
from eosarch.utils import exec_cmd, is_version_file
from eosarch.asynchandler import MetaHandler
from eosarch.exceptions import NoErrorException, NotOnTapeException
class ThreadJob(threading.Thread):
""" Job executing a client.CopyProcess in a separate thread. This makes sense
since a third-party copy job is mostly waiting for the completion of the
job not doing any other operations and therefore not using the GIL too much.
Attributes:
status (bool): Final status of the job
lst_jobs (list): List of jobs to be executed
proc (client.CopyProcess): Copy process which is being executed
retires (int): Number of times this job was retried
"""
def __init__(self, jobs, retry=0):
"""Constructor
Args:
jobs (list): List of transfers to be executed
retry (int): Number of times this job was retried
"""
threading.Thread.__init__(self)
self.retries = retry
self.xrd_status = None
self.lst_jobs = list(jobs)
def run(self):
""" Run method
"""
self.retries += 1
proc = client.CopyProcess()
for job in self.lst_jobs:
# If file is 0-size then we do a normal copy, otherwise we enforce
# a TPC transfer
tpc_flag = "none"
if (int(job[2]) != 0):
tpc_flag = "only"
# TODO: use the parallel mode starting with XRootD 4.1
proc.add_job(job[0], job[1],
force=True, thirdparty=tpc_flag, tpctimeout=3600)
self.xrd_status = proc.prepare()
if self.xrd_status.ok:
self.xrd_status, __ = proc.run()
class ThreadStatus(threading.Thread):
""" Thread responsible for replying to any requests comming from the
dispatcher process.
"""
def __init__(self, transfer):
""" Constructor
Args:
transfer (Transfer): Current transfer object
"""
threading.Thread.__init__(self)
# TODO: drop the logger as it may interfere with the main thread
self.logger = logging.getLogger("transfer")
self.transfer = transfer
self.run_status = True
self.lock = threading.Lock()
def run(self):
""" Run method
"""
self.logger.info("Starting the status thread")
ctx = zmq.Context()
socket_rr = ctx.socket(zmq.DEALER)
socket_rr.connect("ipc://" + self.transfer.config.BACKEND_REQ_IPC)
socket_ps = ctx.socket(zmq.SUB)
mgr_filter = b"[MASTER]"
addr = "ipc://" + self.transfer.config.BACKEND_PUB_IPC
socket_ps.connect(addr)
socket_ps.setsockopt(zmq.SUBSCRIBE, mgr_filter)
while self.keep_running():
if socket_ps.poll(5000):
try:
[__, msg] = socket_ps.recv_multipart()
except zmq.ZMQError as err:
if err.errno == zmq.ETERM:
self.logger.error("ETERM error")
break # shutting down, exit
else:
self.logger.exception(err)
continue
except Exception as err:
self.logger.exception(err)
self.logger.debug("RECV_MSG: {0}".format(msg))
dict_cmd = ast.literal_eval(msg.decode())
if dict_cmd['cmd'] == 'orphan_status':
self.logger.info("Reconnect to master ... ")
resp = ("{{'uuid': '{0}', "
"'pid': '{1}', "
"'uid': '{2}',"
"'gid': '{3}',"
"'root_dir': '{4}', "
"'op': '{5}',"
"'status': '{6}', "
"'timestamp': '{7}'"
"}}").format(self.transfer.uuid,
self.transfer.pid,
self.transfer.uid,
self.transfer.gid,
self.transfer.root_dir,
self.transfer.oper,
self.transfer.get_status(),
self.transfer.timestamp)
elif dict_cmd['cmd'] == 'status':
resp = ("{{'uuid': '{0}', "
"'status': '{1}'"
"}}").format(self.transfer.uuid,
self.transfer.get_status())
else:
self.logger.error("Unknown command: {0}".format(dict_cmd))
continue
self.logger.info("Sending response: {0}".format(resp))
socket_rr.send_multipart([resp.encode()], zmq.NOBLOCK)
def do_finish(self):
""" Set the flag for the status thread to finish execution
"""
self.lock.acquire()
self.run_status = False
self.lock.release()
def keep_running(self):
""" Check if we continue running - the transfer is ongoing
Returns:
True if status thread should keep running, otherwise False
"""
self.lock.acquire()
ret = self.run_status
self.lock.release()
return ret
class Transfer(object):
""" Trasfer archive object
Attributes:
req_json (JSON): Command received from the EOS MGM. Needs to contains the
following entries: cmd, src, opt, uid, gid
threads (list): List of threads doing partial transfers(CopyProcess jobs)
"""
def __init__(self, req_json, config):
self.config = config
self.oper = req_json['cmd']
self.uid, self.gid = req_json['uid'], req_json['gid']
self.do_retry = (req_json['opt'] == self.config.OPT_RETRY)
self.force = (req_json['opt'] == self.config.OPT_FORCE)
self.efile_full = req_json['src']
self.efile_root = self.efile_full[:-(len(self.efile_full) - self.efile_full.rfind('/') - 1)]
self.root_dir = self.efile_root[self.efile_root.rfind('//') + 1:]
self.uuid = sha256(self.root_dir.encode()).hexdigest()
local_file = join(self.config.DIR[self.oper], self.uuid)
self.tx_file = local_file + ".tx"
self.list_jobs, self.threads = [], []
self.pid = os.getpid()
self.archive = None
# Special case for inital PUT as we need to copy also the archive file
self.init_put = self.efile_full.endswith(self.config.ARCH_INIT)
self.status = "initializing"
self.lock_status = threading.Lock()
self.timestamp = time.time()
self.logger = logging.getLogger("transfer")
self.thread_status = ThreadStatus(self)
def get_status(self):
""" Get current status
Returns:
String representing the status
"""
self.lock_status.acquire()
ret = self.status
self.lock_status.release()
return ret
def set_status(self, msg):
""" Set current status
Args:
msg (string): New status
"""
self.lock_status.acquire()
self.status = msg
self.lock_status.release()
def run(self):
""" Run requested operation - fist call prepare
Raises:
IOError
"""
self.thread_status.start()
if self.oper in [self.config.PUT_OP, self.config.GET_OP]:
self.archive_prepare()
if self.do_retry:
self.do_retry_transfer()
else:
try:
self.do_transfer()
except NotOnTapeException as _:
self.logger.notice("Doing transfer re-try due to missing file on tape")
self.do_retry_transfer()
elif self.oper in [self.config.PURGE_OP, self.config.DELETE_OP]:
self.archive_prepare()
self.do_delete((self.oper == self.config.DELETE_OP))
elif self.oper == self.config.BACKUP_OP:
self.backup_prepare()
self.do_backup()
def archive_prepare(self):
""" Prepare requested archive operation.
Raises:
IOError: Failed to rename or transfer archive file.
"""
# Rename archive file in EOS
efile_url = client.URL(self.efile_full)
eosf_rename = ''.join([self.efile_root, self.config.ARCH_FN, ".", self.oper, ".err"])
rename_url = client.URL(eosf_rename)
frename = ''.join([rename_url.protocol, "://", rename_url.hostid,
"//proc/user/?mgm.cmd=file&mgm.subcmd=rename"
"&mgm.path=", efile_url.path,
"&mgm.file.source=", efile_url.path,
"&mgm.file.target=", rename_url.path])
(status, __, stderr) = exec_cmd(frename)
if not status:
err_msg = ("Failed to rename archive file {0} to {1}, msg={2}"
"").format(self.efile_full, rename_url, stderr)
self.logger.error(err_msg)
raise IOError(err_msg)
# Copy archive file from EOS to the local disk
self.efile_full = eosf_rename
eos_fs = client.FileSystem(self.efile_full)
st, _ = eos_fs.copy(self.efile_full + "?eos.ruid=0&eos.rgid=0",
self.tx_file, True)
if not st.ok:
err_msg = ("Failed to copy archive file={0} to local disk at={1}"
"").format(self.efile_full, self.tx_file)
self.logger.error(err_msg)
raise IOError(err_msg)
# Create the ArchiveFile object
d2t = (self.oper == self.config.PUT_OP)
self.archive = ArchiveFile(self.tx_file, d2t)
def do_delete(self, tape_delete):
""" Delete archive either from disk (purge) or from tape (delete)
Args:
tape_delete (boolean): If true delete data from tape, otherwise
from disk.
Raises:
IOError: Failed to delete an entry.
"""
del_dirs = []
self.logger.info("Do delete with tape_delete={0}".format(tape_delete))
# Delete also the archive file saved on tape
if tape_delete:
self.archive.del_entry(self.config.ARCH_INIT, False, tape_delete)
# First remove all the files and then the directories
for fentry in self.archive.files():
# d2t is false for both purge and deletion
self.archive.del_entry(fentry[1], False, tape_delete)
for dentry in self.archive.dirs():
# Don't remove the root directory when purging
if not tape_delete and dentry[1] == './':
continue
del_dirs.append(dentry[1])
# Remove the directories from bottom up
while len(del_dirs):
dpath = del_dirs.pop()
self.archive.del_entry(dpath, True, tape_delete)
# Remove immutable flag from the EOS sub-tree
if tape_delete:
self.archive.make_mutable()
self.archive_tx_clean(True)
def do_transfer(self):
""" Execute a put or get operation.
Raises:
IOError when an IO opperations fails.
"""
t0 = time.time()
indx_dir = 0
# Create directories
for dentry in self.archive.dirs():
if dentry[1] == "./":
self.archive.check_root_dir()
indx_dir += 1
self.archive.mkdir(dentry)
msg = "create dir {0}/{1}".format(indx_dir, self.archive.header['num_dirs'])
self.set_status(msg)
# For GET issue the Prepare2Get for all the files on tape
self.prepare2get()
# Copy files
self.copy_files()
# For GET set file ownership and permissions
self.update_file_access()
# Verify the transferred entries
self.set_status("verifying")
check_ok, __ = self.archive.verify(False)
# For PUT operations wait that all the files are on tape and for GET
# send a "prepare evict" request to CTA to clear the disk cache
if self.archive.d2t:
self.set_status("wait_on_tape")
self.wait_on_tape()
else:
self.set_status("evict_disk_cache")
try:
self.evict_disk_cache()
except OverflowError as __:
self.logger.warning("The XRootD Python bindings do not support "
"the evict flag yet!")
self.set_status("cleaning")
self.logger.info("TIMING_transfer={0} sec".format(time.time() - t0))
self.archive_tx_clean(check_ok)
def do_retry_transfer(self):
""" Execute a put or get retry operation.
Raises:
IOError when an IO opperations fails.
"""
t0 = time.time()
indx_dir = 0
err_entry = None
tx_ok, meta_ok = True, True
found_checkpoint = False # flag set when reaching recovery entry
# Get the first corrupted entry and the type of corruption
(tx_ok, meta_ok, lst_failed) = self.check_previous_tx()
if not tx_ok or not meta_ok:
err_entry = lst_failed[0]
# Create directories
for dentry in self.archive.dirs():
# Search for the recovery checkpoint
if not found_checkpoint:
if dentry != err_entry:
indx_dir += 1
continue
else:
found_checkpoint = True
indx_dir += 1
self.archive.mkdir(dentry)
msg = "create dir {0}/{1}".format(indx_dir, self.archive.header['num_dirs'])
self.set_status(msg)
if not tx_ok:
# For GET issue the Prepare2Get for all the files on tape
self.prepare2get(err_entry, found_checkpoint)
# Copy files
self.copy_files(err_entry, found_checkpoint)
# For GET set file ownership and permissions for all entries
self.update_file_access(err_entry, found_checkpoint)
else:
# For GET metadata errors set file ownership and permissions only
# for entries after the first corrupted one
self.update_file_access()
# Verify the transferred entries
self.set_status("verifying")
check_ok, __ = self.archive.verify(False)
# For PUT operations wait that all the files are on tape
if self.archive.d2t:
self.set_status("wait_on_tape")
self.wait_on_tape()
else:
self.set_status("evict_disk_cache")
try:
self.evict_disk_cache()
except OverflowError as __:
self.logger.warning("The XRootD Python bindings do not support "
"the evict flag yet!")
self.set_status("cleaning")
self.logger.info("TIMING_transfer={0} sec".format(time.time() - t0))
self.archive_tx_clean(check_ok)
def tx_clean(self, check_ok):
""" Clean a backup/archive transfer depending on its type.
"""
if self.oper == self.config.BACKUP_OP:
self.backup_tx_clean()
else:
self.archive_tx_clean(check_ok)
def backup_tx_clean(self):
""" Clean after a backup transfer by copying the log file in the same
directory as the destiantion of the backup.
"""
# Copy local log file to EOS directory
eos_log = ''.join([self.efile_root, ".sys.b#.backup.log?eos.ruid=0&eos.rgid=0"])
self.logger.debug("Copy log:{0} to {1}".format(self.config.LOG_FILE, eos_log))
self.config.handler.flush()
cp_client = client.FileSystem(self.efile_full)
st, __ = cp_client.copy(self.config.LOG_FILE, eos_log, force=True)
if not st.ok:
self.logger.error(("Failed to copy log file {0} to EOS at {1}"
"").format(self.config.LOG_FILE, eos_log))
else:
# Delete log file if it was successfully copied to EOS
try:
os.remove(self.config.LOG_FILE)
except OSError as __:
pass
# Delete all local files associated with this transfer
try:
os.remove(self.tx_file)
except OSError as __:
pass
# Join async status thread
self.thread_status.do_finish()
self.thread_status.join()
def archive_tx_clean(self, check_ok):
""" Clean the transfer by renaming the archive file in EOS adding the
following extensions:
.done - the transfer was successful
.err - there were errors during the transfer. These are logged in the
file .archive.log in the same directory.
Args:
check_ok (bool): True if no error occured during transfer,
otherwise false.
"""
# Rename arch file in EOS to reflect the status
if not check_ok:
eosf_rename = ''.join([self.efile_root, self.config.ARCH_FN, ".", self.oper, ".err"])
else:
eosf_rename = ''.join([self.efile_root, self.config.ARCH_FN, ".", self.oper, ".done"])
old_url = client.URL(self.efile_full)
new_url = client.URL(eosf_rename)
frename = ''.join([old_url.protocol, "://", old_url.hostid, "//proc/user/?",
"mgm.cmd=file&mgm.subcmd=rename&mgm.path=", old_url.path,
"&mgm.file.source=", old_url.path,
"&mgm.file.target=", new_url.path])
(status, __, stderr) = exec_cmd(frename)
if not status:
err_msg = ("Failed to rename {0} to {1}, msg={2}"
"").format(self.efile_full, eosf_rename, stderr)
self.logger.error(err_msg)
# TODO: raise IOError
else:
# For successful delete operations remove also the archive file
if self.oper == self.config.DELETE_OP and check_ok:
fs = client.FileSystem(self.efile_full)
st_rm, __ = fs.rm(new_url.path + "?eos.ruid=0&eos.rgid=0")
if not st_rm.ok:
warn_msg = "Failed to delete archive {0}".format(new_url.path)
self.logger.warning(warn_msg)
# Copy local log file back to EOS directory and set the ownership to the
# identity of the client who triggered the archive
dir_root = self.efile_root[self.efile_root.rfind('//') + 1:]
eos_log = ''.join([old_url.protocol, "://", old_url.hostid, "/",
dir_root, self.config.ARCH_FN, ".log?eos.ruid=0&eos.rgid=0"])
self.logger.debug("Copy log:{0} to {1}".format(self.config.LOG_FILE, eos_log))
self.config.handler.flush()
cp_client = client.FileSystem(self.efile_full)
st, __ = cp_client.copy(self.config.LOG_FILE, eos_log, force=True)
if not st.ok:
self.logger.error(("Failed to copy log file {0} to EOS at {1}"
"").format(self.config.LOG_FILE, eos_log))
else:
# User triggering archive operation owns the log file
eos_log_url = client.URL(eos_log)
fs = client.FileSystem(eos_log)
arg = ''.join([eos_log_url.path, "?eos.ruid=0&eos.rgid=0&mgm.pcmd=chown&uid=",
self.uid, "&gid=", self.gid])
xrd_st, __ = fs.query(QueryCode.OPAQUEFILE, arg)
if not xrd_st.ok:
err_msg = ("Failed setting ownership of the log file in"
" EOS: {0}").format(eos_log)
self.logger.error(err_msg)
raise IOError(err_msg)
else:
# Delete log if successfully copied to EOS and changed ownership
try:
os.remove(self.config.LOG_FILE)
except OSError as __:
pass
# Delete all local files associated with this transfer
try:
os.remove(self.tx_file)
except OSError as __:
pass
# Join async status thread
self.thread_status.do_finish()
self.thread_status.join()
def copy_files(self, err_entry=None, found_checkpoint=False):
""" Copy files.
Note that when doing PUT the layout is not conserved. Therefore, a file
with 3 replicas will end up as just a simple file in the new location.
Args:
err_entry (list): Entry record from the archive file corresponding
to the first file/dir that was corrupted.
found_checkpoint (boolean): If True it means the checkpoint was
already found and we don't need to search for it.
Raises:
IOError: Copy request failed.
"""
indx_file = 0
# For inital PUT copy also the archive file to tape
if self.init_put:
# The archive init is already renamed to archive.put.err at this
# and we need to take this into consideration when trasferring it
url = client.URL(self.efile_full)
eos_fs = client.FileSystem(self.efile_full)
st_stat, resp = eos_fs.stat(url.path)
if st_stat.ok:
__, dst = self.archive.get_endpoints(self.config.ARCH_INIT)
self.list_jobs.append((self.efile_full + "?eos.ruid=0&eos.rgid=0" +
"&eos.app=archive", dst, resp.size))
else:
err_msg = ''.join(["Failed to get init archive file info, msg=",
st_stat.message])
self.logger.error(err_msg)
raise IOError(err_msg)
# Copy files
for fentry in self.archive.files():
# Search for the recovery checkpoint
if self.do_retry and not found_checkpoint:
if fentry != err_entry:
indx_file += 1
continue
else:
found_checkpoint = True
indx_file += 1
msg = "copy file {0}/{1}".format(indx_file, self.archive.header['num_files'])
self.set_status(msg)
src, dst = self.archive.get_endpoints(fentry[1])
dfile = dict(zip(self.archive.header['file_meta'], fentry[2:]))
# Copy file
if not self.archive.d2t:
# For GET we also have the dictionary with the metadata
dst = ''.join([dst, "?eos.ctime=", dfile['ctime'],
"&eos.mtime=", dfile['mtime'],
"&eos.bookingsize=", dfile['size'],
"&eos.targetsize=", dfile['size'],
"&eos.ruid=0&eos.rgid=0&eos.app=archive"])
# If checksum 0 don't enforce it
if dfile['xs'] != "0":
dst = ''.join([dst, "&eos.checksum=", dfile['xs']])
# For backup we try to read as root from the source
if self.oper == self.config.BACKUP_OP:
if '?' in src:
src = ''.join([src, "&eos.ruid=0&eos.rgid=0&eos.app=archive"])
else:
src = ''.join([src, "?eos.ruid=0&eos.rgid=0&eos.app=archive"])
# If this is a version file we save it as a 2-replica layout
if is_version_file(fentry[1]):
dst = ''.join([dst, "&eos.layout.checksum=", dfile['xstype'],
"&eos.layout.type=replica&eos.layout.nstripes=2"])
# If time window specified then select only the matching entries
if (self.archive.header['twindow_type'] and
self.archive.header['twindow_val']):
twindow_sec = int(self.archive.header['twindow_val'])
tentry_sec = int(float(dfile[self.archive.header['twindow_type']]))
if tentry_sec < twindow_sec:
continue
else:
# For PUT read the files from EOS as root
src = ''.join([src, "?eos.ruid=0&eos.rgid=0&eos.app=archive"])
self.logger.info("Copying from {0} to {1}".format(src, dst))
self.list_jobs.append((src, dst, dfile['size']))
if len(self.list_jobs) >= self.config.BATCH_SIZE:
st = self.flush_files(False)
# For archives we fail immediately, for backups it's best-effort
if not st and self.oper != self.config.BACKUP_OP:
err_msg = "Failed to flush files"
self.logger.error(err_msg)
raise IOError(err_msg)
# Flush all pending copies and set metadata info for GET operation
st = self.flush_files(True)
if not st and self.oper != self.config.BACKUP_OP:
err_msg = "Failed to flush files"
self.logger.error(err_msg)
raise IOError(err_msg)
def flush_files(self, wait_all):
""" Flush all pending transfers from the list of jobs.
Args:
wait_all (bool): If true wait and collect the status from all
executing threads.
Returns:
True if files flushed successfully, otherwise false.
"""
status = True
# Wait until a thread from the pool gets freed if we reached the maximum
# allowed number of running threads
while len(self.threads) >= self.config.MAX_THREADS:
remove_indx, retry_threads = [], []
for indx, thread in enumerate(self.threads):
thread.join(self.config.JOIN_TIMEOUT)
# If thread finished get the status and mark it for removal
if not thread.is_alive():
# If failed then attempt a retry
if (not thread.xrd_status.ok and
thread.retries <= self.config.MAX_RETRIES):
self.logger.log(logging.INFO,
("Thread={0} failed, retries={1}").format
(thread.ident, thread.retries))
rthread = ThreadJob(thread.lst_jobs, thread.retries)
rthread.start()
retry_threads.append(rthread)
remove_indx.append(indx)
self.logger.log(logging.INFO,("New thread={0} doing a retry").format
(rthread.ident))
continue
status = status and thread.xrd_status.ok
log_level = logging.INFO if thread.xrd_status.ok else logging.ERROR
self.logger.log(log_level,("Thread={0} status={1} msg={2}").format
(thread.ident, thread.xrd_status.ok,
thread.xrd_status.message))
remove_indx.append(indx)
break
# Remove old/finished threads and add retry ones. For removal we
# need to start with big indexes first.
remove_indx.reverse()
for indx in remove_indx:
del self.threads[indx]
self.threads.extend(retry_threads)
del retry_threads[:]
del remove_indx[:]
# If we still have jobs and previous archive jobs were successful or this
# is a backup operartion (best-effort even if we have failed transfers)
if (self.list_jobs and ((self.oper != self.config.BACKUP_OP and status) or
(self.oper == self.config.BACKUP_OP))):
thread = ThreadJob(self.list_jobs)
thread.start()
self.threads.append(thread)
del self.list_jobs[:]
# If a previous archive job failed or we need to wait for all jobs to
# finish then join the threads and collect their status
if (self.oper != self.config.BACKUP_OP and not status) or wait_all:
remove_indx, retry_threads = [], []
while self.threads:
for indx, thread in enumerate(self.threads):
thread.join()
# If failed then attempt a retry
if (not thread.xrd_status.ok and
thread.retries <= self.config.MAX_RETRIES):
self.logger.log(logging.INFO, ("Thread={0} failed, retries={1}").format
(thread.ident, thread.retries))
rthread = ThreadJob(thread.lst_jobs, thread.retries)
rthread.start()
retry_threads.append(rthread)
remove_indx.append(indx)
self.logger.log(logging.INFO,("New thread={0} doing a retry").format
(rthread.ident))
continue
status = status and thread.xrd_status.ok
log_level = logging.INFO if thread.xrd_status.ok else logging.ERROR
self.logger.log(log_level, ("Thread={0} status={1} msg={2}").format
(thread.ident, thread.xrd_status.ok,
thread.xrd_status.message))
remove_indx.append(indx)
# Remove old/finished threads and add retry ones. For removal we
# need to start with big indexes first.
remove_indx.reverse()
for indx in remove_indx:
del self.threads[indx]
self.threads.extend(retry_threads)
del retry_threads[:]
del remove_indx[:]
return status
def update_file_access(self, err_entry=None, found_checkpoint=False):
""" Set the ownership and the permissions for the files copied to EOS.
This is done only for GET operation i.e. self.archive.d2t == False.
Args:
err_entry (list): Entry record from the archive file corresponding
to the first file/dir that was corrupted.
found_checkpoint (boolean): If True, it means the checkpoint was
already found and we don't need to search for it i.e. the
corrupted entry is a directory.
Raises:
IOError: chown or chmod operations failed
"""
if self.archive.d2t:
return
self.set_status("updating file access")
t0 = time.time()
oper = 'query'
metahandler = MetaHandler()
fs = self.archive.fs_src
for fentry in self.archive.files():
# If backup operation and time window specified then update only matching ones
if self.oper == self.config.BACKUP_OP:
if self.archive.header['twindow_type'] and self.archive.header['twindow_val']:
dfile = dict(zip(self.archive.header['file_meta'], fentry[2:]))
twindow_sec = int(self.archive.header['twindow_val'])
tentry_sec = int(float(dfile[self.archive.header['twindow_type']]))
if tentry_sec < twindow_sec:
continue
# Search for the recovery checkpoint
if err_entry and not found_checkpoint:
if fentry != err_entry:
continue
else:
found_checkpoint = True
__, surl = self.archive.get_endpoints(fentry[1])
url = client.URL(surl)
dict_meta = dict(zip(self.archive.header['file_meta'], fentry[2:]))
# Send the chown async request
arg = ''.join([url.path, "?eos.ruid=0&eos.rgid=0&mgm.pcmd=chown&uid=",
dict_meta['uid'], "&gid=", dict_meta['gid']])
xrd_st = fs.query(QueryCode.OPAQUEFILE, arg,
callback=metahandler.register(oper, surl))
if not xrd_st.ok:
__ = metahandler.wait(oper)
err_msg = "Failed query chown for path={0}".format(surl)
self.logger.error(err_msg)
raise IOError(err_msg)
# Send the chmod async request
mode = int(dict_meta['mode'], 8) # mode is saved in octal format
arg = ''.join([url.path, "?eos.ruid=0&eos.rgid=0&mgm.pcmd=chmod&mode=",
str(mode)])
xrd_st = fs.query(QueryCode.OPAQUEFILE, arg,
callback=metahandler.register(oper, surl))
if not xrd_st.ok:
__ = metahandler.wait(oper)
err_msg = "Failed query chmod for path={0}".format(surl)
self.logger.error(err_msg)
raise IOError(err_msg)
# Send the utime async request to set the mtime
mtime = dict_meta['mtime']
mtime_sec, mtime_nsec = mtime.split('.', 1)
ctime = dict_meta['ctime']
ctime_sec, ctime_nsec = ctime.split('.', 1)
arg = ''.join([url.path, "?eos.ruid=0&eos.rgid=0&mgm.pcmd=utimes",
"&tv1_sec=", ctime_sec, "&tv1_nsec=", ctime_nsec,
"&tv2_sec=", mtime_sec, "&tv2_nsec=", mtime_nsec])
xrd_st = fs.query(QueryCode.OPAQUEFILE, arg,
callback=metahandler.register(oper, surl))
if not xrd_st.ok:
__ = metahandler.wait(oper)
err_msg = "Failed query utimes for path={0}".format(surl)
self.logger.error(err_msg)
raise IOError(err_msg)
status = metahandler.wait(oper)
if status:
t1 = time.time()
self.logger.info("TIMING_update_file_access={0} sec".format(t1 - t0))
else:
err_msg = "Failed update file access"
self.logger.error(err_msg)
raise IOError(err_msg)
def check_previous_tx(self):
""" Find checkpoint for a previous run. There are two types of checks
being done:
- transfer check = verify that the files exist and have the correct
size and checksum
- metadata check = verify that all the entries have the correct meta-
data values set
Returns:
(tx_ok, meta_ok, lst_failed): Tuple holding the status of the
different checks and the list of corrupted entries.
"""
msg = "verify last run"
self.set_status(msg)
meta_ok = False
# Check for existence, file size and checksum
tx_ok, lst_failed = self.archive.verify(False, True)
if tx_ok:
meta_ok, lst_failed = self.archive.verify(False, False)
if meta_ok:
self.do_retry = False
raise NoErrorException()
# Delete the corrupted entry if this is a real transfer error
if not tx_ok:
err_entry = lst_failed[0]
is_dir = (err_entry[0] == 'd')
self.logger.info("Delete corrupted entry={0}".format(err_entry))
if is_dir:
self.archive.del_subtree(err_entry[1], None)
else:
self.archive.del_entry(err_entry[1], False, None)
return (tx_ok, meta_ok, lst_failed)
def prepare2get(self, err_entry=None, found_checkpoint=False):
"""This method is only executed for GET operations and its purpose is
to issue the Prepapre2Get commands for the files in the archive which
will later on be copied back to EOS.
Args:
err_entry (list): Entry record from the archive file corresponding
to the first file/dir that was corrupted.
found_checkpoint (bool): If True it means the checkpoint was
already found and we don't need to search for it.
Raises:
IOError: The Prepare2Get request failed.
"""
if self.archive.d2t:
return
count = 0
limit = 50 # max files per prepare request
oper = 'prepare'
self.set_status("prepare2get")
t0 = time.time()
lpaths = []
status = True
metahandler = MetaHandler()
for fentry in self.archive.files():
# Find error checkpoint if not already found
if err_entry and not found_checkpoint:
if fentry != err_entry:
continue
else:
found_checkpoint = True
count += 1
surl, __ = self.archive.get_endpoints(fentry[1])
lpaths.append(surl[surl.rfind('//') + 1:])
if len(lpaths) == limit:
xrd_st = self.archive.fs_dst.prepare(lpaths, PrepareFlags.STAGE,
callback=metahandler.register(oper, surl))
if not xrd_st.ok:
__ = metahandler.wait(oper)
err_msg = "Failed prepare2get for path={0}".format(surl)
self.logger.error(err_msg)
raise IOError(err_msg)
# Wait for batch to be executed
del lpaths[:]
status = status and metahandler.wait(oper)
self.logger.debug(("Prepare2get done count={0}/{1}"
"").format(count, self.archive.header['num_files']))
if not status:
break
# Send the remaining requests
if lpaths and status:
xrd_st = self.archive.fs_dst.prepare(lpaths, PrepareFlags.STAGE,
callback=metahandler.register(oper, surl))
if not xrd_st.ok:
__ = metahandler.wait(oper)
err_msg = "Failed prepare2get"
self.logger.error(err_msg)
raise IOError(err_msg)
# Wait for batch to be executed
del lpaths[:]
status = status and metahandler.wait(oper)
if status:
t1 = time.time()
self.logger.info("TIMING_prepare2get={0} sec".format(t1 - t0))
else:
err_msg = "Failed prepare2get"
self.logger.error(err_msg)
raise IOError(err_msg)
# Wait for all the files to be on disk
for fentry in self.archive.files():
surl, __ = self.archive.get_endpoints(fentry[1])
url = client.URL(surl)
while True:
st_stat, resp_stat = self.archive.fs_dst.stat(url.path)
if not st_stat.ok:
err_msg = "Error stat entry={0}".format(surl)
self.logger.error(err_msg)
raise IOError()
# Check if file is on disk
if resp_stat.flags & StatInfoFlags.OFFLINE:
self.logger.info("Sleep 5 seconds, file not on disk entry={0}".format(surl))
sleep(5)
else:
break
self.logger.info("Finished prepare2get, all files are on disk")
def evict_disk_cache(self):
""" Send a prepare eviect request to the CTA so that the files are
removed from the disk cached of the tape system.
"""
batch_size = 100
timeout = 10
batch = []
# @todo(esindril) use the XRootD proived flag once this is
# available in the Python interface
xrd_prepare_evict_flag = 0x000100000000
for fentry in self.archive.files():
__, dst = self.archive.get_endpoints(fentry[1])
url = client.URL(dst)
batch.append(url.path)
if len(batch) == batch_size:
fs = self.archive.get_fs(dst)
prep_stat, __ = fs.prepare(batch, xrd_prepare_evict_flag, 0, timeout)
batch.clear()
if not prep_stat.ok:
self.logger.warning("Failed prepare evit for batch")
if len(batch) != 0:
fs = self.archive.get_fs(dst)
prep_stat, __ = fs.prepare(batch, xrd_prepare_evict_flag, 0, timeout)
batch.clear()
if not prep_stat.ok:
self.logger.warning("Failed prepare evit for batch")
self.logger.info("Finished sending all the prepare evict requests")
def wait_on_tape(self):
""" Check and wait that all the files are on tape, which in our case
means checking the "m" bit. If a file is not on tape then suspend the
current thread for a period of 5 to 60 seconds but abort if the file
fails to be archived on tape afte 24h
"""
max_timeout_per_entry = int(self.config.ARCHIVE_MAX_TIMEOUT)
min_timeout, max_timeout = 5, 60
for fentry in self.archive.files():
start_ts = time.time()
__, dst = self.archive.get_endpoints(fentry[1])
url = client.URL(dst)
file_on_tape = False
while not file_on_tape:
st_stat, resp_stat = self.archive.fs_dst.stat(url.path)
if not st_stat.ok:
err_msg = "Error stat entry={0}".format(dst)
self.logger.error(err_msg)
raise IOError()
# Check file is on tape
if resp_stat.size != 0 and not (resp_stat.flags & StatInfoFlags.BACKUP_EXISTS):
self.logger.debug("File {0} is not yet on tape".format(dst))
timeout = randrange(min_timeout, max_timeout)
self.logger.info("Going to sleep for {0} seconds".format(timeout))
sleep(timeout)
if time.time() - start_ts > max_timeout_per_entry:
self.logger.notice("Entry not archived within the maximum timeout."
" entry={0} archive_max_timeout={1}".format(
fentry[1], max_timeout_per_entry))
break
else:
file_on_tape = True
else:
file_on_tape = True
if not file_on_tape:
# Throw exception to re-try the failed transfer
raise NotOnTapeException()
def backup_prepare(self):
""" Prepare requested backup operation.
Raises:
IOError: Failed to transfer backup file.
"""
# Copy backup file from EOS to the local disk
self.logger.info(("Prepare backup copy from {0} to {1}"
"").format(self.efile_full, self.tx_file))
eos_fs = client.FileSystem(self.efile_full)
st, _ = eos_fs.copy((self.efile_full + "?eos.ruid=0&eos.rgid=0"),
self.tx_file, True)
if not st.ok:
err_msg = ("Failed to copy backup file={0} to local disk at={1} err_msg={2}"
"").format(self.efile_full, self.tx_file, st.message)
self.logger.error(err_msg)
raise IOError(err_msg)
# Create the ArchiveFile object for the backup which is similar to a
# tape to disk transfer
self.archive = ArchiveFile(self.tx_file, False)
# Check that the destination directory exists and has mode 777, if
# forced then skip checks
if not self.force:
surl = self.archive.header['dst']
url = client.URL(surl)
fs = self.archive.get_fs(surl)
st_stat, resp_stat = fs.stat((url.path, + "?eos.ruid=0&eos.rgid=0"))
if st_stat.ok:
err_msg = ("Failed to stat backup destination url={0}"
"").format(surl)
self.logger.error(err_msg)
raise IOError(err_msg)
if resp_stat.flags != (client.StatInfoFlags.IS_READABLE |
client.StatInfoFlags.IS_WRITABLE):
err_msg = ("Backup destination url={0} must have move 777").format(surl)
self.logger.error(err_msg)
raise IOError(err_msg)
def do_backup(self):
""" Perform a backup operation using the provided backup file.
"""
t0 = time.time()
indx_dir = 0
# Root owns the .sys.b#.backup.file
fs = client.FileSystem(self.efile_full)
efile_url = client.URL(self.efile_full)
arg = ''.join([efile_url.path, "?eos.ruid=0&eos.rgid=0&mgm.pcmd=chown&uid=0&gid=0"])
xrd_st, __ = fs.query(QueryCode.OPAQUEFILE, arg)
if not xrd_st.ok:
err_msg = "Failed setting ownership of the backup file: {0}".format(self.efile_full)
self.logger.error(err_msg)
raise IOError(err_msg)
# Create directories
for dentry in self.archive.dirs():
# Do special checks for root directory
#if dentry[1] == "./":
# self.archive.check_root_dir()
indx_dir += 1
self.archive.mkdir(dentry)
msg = "create dir {0}/{1}".format(indx_dir, self.archive.header['num_dirs'])
self.set_status(msg)
# Copy files and set metadata information
self.copy_files()
self.update_file_access()
self.set_status("verifying")
check_ok, lst_failed = self.archive.verify(True)
self.backup_write_status(lst_failed, check_ok)
self.set_status("cleaning")
self.logger.info("TIMING_transfer={0} sec".format(time.time() - t0))
self.backup_tx_clean()
def backup_write_status(self, lst_failed, check_ok):
""" Create backup status file which constains the list of failed files
to transfer.
Args:
lst_filed (list): List of failed file transfers
check_ok (boolean): True if verification successful, otherwise
false
"""
if not check_ok:
self.logger.error("Failed verification for {0} entries".format(len(lst_failed)))
fn_status = ''.join([self.efile_root, ".sys.b#.backup.err.", str(len(lst_failed)),
"?eos.ruid=0&eos.rgid=0"])
else:
self.logger.info("Backup successful - no errors detected")
fn_status = ''.join([self.efile_root, ".sys.b#.backup.done?eos.ruid=0&eos.rgid=0"])
with client.File() as f:
f.open(fn_status, OpenFlags.UPDATE | OpenFlags.DELETE)
offset = 0
for entry in lst_failed:
buff = "Failed entry={0}\n".format(entry)
f.write(buff, offset, len(buff))
offset += len(buff)
================================================
FILE: archive/eosarch/utils.py
================================================
# ------------------------------------------------------------------------------
# File: utils.py
# Author: Elvin-Alin Sindrilaru
# ------------------------------------------------------------------------------
#
# ******************************************************************************
# EOS - the CERN Disk Storage System
# Copyright (C) 2014 CERN/Switzerland
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see .
# ******************************************************************************
"""Module containing helper function for the EOS archiver daemon."""
from __future__ import unicode_literals
import logging
from XRootD import client
from XRootD.client.flags import OpenFlags
logger = logging.getLogger("transfer")
def seal_path(path, seal_dict={'&': "#AND#"}):
""" Seal a path by replacing the key characters in the dictionary with their
values so that EOS is happy.
Args:
path (str): Path to be sealed
seal (dict): Seal dictionary
Returns:
The path transformed using the dictionary mapping.
"""
for key, val in seal_dict.items():
path = path.replace(key, val)
return path
def unseal_path(path, seal_dict={"#AND#": '&'}):
""" Unseal a path by replacing the key characters in the dictionary with their
values so that we are happy.
Args:
path (str): Path to be unsealed
seal (dict): Unseal dictionary
Returns:
The path transformed using the dictionary mapping.
"""
for key, val in seal_dict.items():
path = path.replace(key, val)
return path
def is_version_file(path):
""" Check if this is a version file i.e. contains the following prefix:
".sys.v#"
Args:
path (string): Relative path
Returns:
True if this is a version file, otherwise false.
"""
return path.startswith(".sys.v#.") or "/.sys.v#." in path
def is_atomic_version_file(path):
""" Check if this is a version file i.e. contains the following prefix:
".sys.a#.v#"
Args:
path (string): Relative path
Returns:
True if this is an atomic version file, otherwise false.
"""
return path.startswith(".sys.a#.v#.") or "/.sys.a#.v#." in path
def exec_cmd(cmd):
""" Execute an EOS /proc/user/ command.
Args:
cmd (str): Command to execute.
Returns:
Tuple containing the following elements: (status, stdout, stderr). Status
is a boolean value while the rest are string. If data needs to be returned
then it's put in stdout and any error messages are in stderr.
"""
logger.debug("Execute: {0}".format(cmd))
status, retc, stdout, stderr = False, "0", "", ""
# Execute the command as root if role not already set
if cmd.find("eos.ruid=") == -1:
if cmd.find('?') == -1:
cmd += "?eos.ruid=0&eos.rgid=0"
else:
cmd += "&eos.ruid=0&eos.rgid=0"
with client.File() as f:
st, __ = f.open(cmd, OpenFlags.READ)
if st.ok:
# Read the whole response
data = ""
off, sz = 0, 4096
st, chunk = f.read(off, sz)
if st.ok:
while st.ok and len(chunk):
off += len(chunk)
try:
data += chunk.decode("utf-8")
except:
print("EHEHEHEH not able to decode str... only bytes")
st, chunk = f.read(off, sz)
lpairs = data.split('&')
for elem in lpairs:
if "mgm.proc.retc=" in elem:
retc = elem[(elem.index('=') + 1):].strip()
status = True if (retc == "0") else False
elif "mgm.proc.stdout=" in elem:
stdout = elem[(elem.index('=') + 1):].strip()
stdout = unseal_path(stdout)
elif "mgm.proc.stderr=" in elem:
stderr = elem[(elem.index('=') + 1):].strip()
stderr = unseal_path(stderr)
else:
stderr = "error reading response for command: {0}".format(cmd)
else:
stderr = "error sending command: {0}".format(cmd)
# logger.debug("Return command: {0}".format((status, stdout, stderr)))
return (status, stdout, stderr)
def get_entry_info(url, rel_path, tags, is_dir):
""" Get file/directory metadata information from EOS.
Args:
url (XRootD.URL): Full URL to EOS location.
rel_path (str): Entry's relative path as saved in the archive file.
tags (list): List of tags to look for in the fileinfo result.
is_dir (bool): If True entry is a directory, otherwise a file.
Returns:
A list containing the info corresponding to the tags supplied in
the args.
Raises:
IOError: Fileinfo request can not be submitted.
AttributeError: Not all expected tags are provided.
KeyError: Extended attribute value is not present.
"""
dinfo = []
finfo = ''.join([url.protocol, "://", url.hostid, "//proc/user/?",
"mgm.cmd=fileinfo&mgm.path=", seal_path(url.path),
"&mgm.file.info.option=-m"])
(status, stdout, stderr) = exec_cmd(finfo)
if not status:
err_msg = ("Path={0} failed fileinfo request, msg={1}").format(
url.path, stderr)
logger.error(err_msg)
raise IOError(err_msg)
# Extract the path by using the keylength.file value which represents the
# size of the path. This is because the path can contain spaces.
size_pair, file_pair, tail = stdout.split(' ', 2)
sz_key, sz_val = size_pair.split('=', 1)
file_key, file_val = file_pair.split('=', 1)
if sz_key == "keylength.file" and file_key == "file" :
path = file_val
path_size = int(sz_val)
while path_size > len(path.encode("utf-8")):
path_token, tail = tail.split(' ', 1)
path += ' '
path += path_token
else:
err_msg = ("Fileinfo response does not start with keylength.file "
"for path").format(url.path)
logger.error(err_msg)
raise IOError(err_msg)
# For the rest we don't expect any surprizes, they shoud be key=val pairs
lpairs = tail.split(' ')
it_list = iter(lpairs)
dict_info, dict_attr = {}, {}
# Parse output of fileinfo -m keeping only the required keys
for elem in it_list:
if '=' not in elem:
continue
key, value = elem.split('=', 1)
if len(value) == 0:
continue
if key in tags:
dict_info[key] = value
elif key == "xattrn" and is_dir:
xkey, xval = next(it_list).split('=', 1)
if xkey != "xattrv":
err_msg = ("Dir={0} no value for xattrn={1}").format(
url.path, value)
logger.error(err_msg)
raise KeyError(err_msg)
else:
dict_attr[value] = xval
# For directories add also the xattr dictionary
if is_dir and "attr" in tags:
dict_info["attr"] = dict_attr
if len(dict_info) == len(tags):
# Dirs must end with '/' just as the output of EOS fileinfo -d
tentry = 'd' if is_dir else 'f'
dinfo.extend([tentry, rel_path])
for tag in tags:
dinfo.append(dict_info[tag])
else:
err_msg = ("Path={0}, not all expected tags found").format(url.path)
logger.error(err_msg)
raise AttributeError(err_msg)
return dinfo
def set_dir_info(surl, dict_dinfo, excl_xattr):
""" Set directory metadata information in EOS.
Args:
surl (string): Full URL of directory
dict_dinfo (dict): Dictionary containsing meta-data information
excl_xattr (list): List of excluded extended attributes
Raises:
IOError: Metadata operation failed.
"""
url = client.URL(surl)
# Change ownership of the directory
fsetowner = ''.join([url.protocol, "://", url.hostid, "//proc/user/?",
"mgm.cmd=chown&mgm.path=", seal_path(url.path),
"&mgm.chown.owner=", dict_dinfo['uid'], ":",
dict_dinfo['gid']])
(status, stdout, stderr) = exec_cmd(fsetowner)
if not status:
err_msg = "Dir={0}, error doing chown, msg={1}".format(url.path, stderr)
logger.error(err_msg)
raise IOError(err_msg)
# Set permission on the directory
fchmod = ''.join([url.protocol, "://", url.hostid, "//proc/user/?",
"mgm.cmd=chmod&mgm.path=", seal_path(url.path),
"&mgm.chmod.mode=", dict_dinfo['mode']])
(status, stdout, stderr) = exec_cmd(fchmod)
if not status:
err_msg = "Dir={0}, error doing chmod, msg={1}".format(url.path, stderr)
logger.error(err_msg)
raise IOError(err_msg)
# Deal with extended attributes. If all are excluded then don't touch them.
if "*" in excl_xattr:
return
# Get all the current xattrs
flsattr = ''.join([url.protocol, "://", url.hostid, "//proc/user/?",
"mgm.cmd=attr&mgm.subcmd=ls&mgm.path=", seal_path(url.path)])
(status, stdout, stderr) = exec_cmd(flsattr)
if not status:
err_msg = "Dir={0}, error listing xattrs, msg ={1}".format(
url.path, stderr)
logger.error(err_msg)
raise IOError(err_msg)
lattrs = [s.split('=', 1)[0] for s in stdout.splitlines()]
for attr in lattrs:
# Don't remove the excluded xattrs
if attr in excl_xattr:
continue
frmattr = ''.join([url.protocol, "://", url.hostid, "//proc/user/?",
"mgm.cmd=attr&mgm.subcmd=rm&mgm.attr.key=", attr,
"&mgm.path=", seal_path(url.path)])
(status, __, stderr) = exec_cmd(frmattr)
if not status:
err_msg = ("Dir={0} error while removing attr={1}, msg={2}"
"").format(url.path, attr, stderr)
logger.error(err_msg)
raise IOError(err_msg)
# Set the expected extended attributes
dict_dattr = dict_dinfo['attr']
for key, val in dict_dattr.items():
# Don't set the excluded xattrs
if key in excl_xattr:
continue
if len(val) == 0:
continue
fsetattr = ''.join([url.protocol, "://", url.hostid, "//proc/user/?",
"mgm.cmd=attr&mgm.subcmd=set&mgm.attr.key=", key,
"&mgm.attr.value=", val, "&mgm.path=", seal_path(url.path)])
(status, __, stderr) = exec_cmd(fsetattr)
if not status:
err_msg = "Dir={0}, error setting attr={1}, msg={2}".format(
url.path, key, stderr)
logger.error(err_msg)
raise IOError(err_msg)
================================================
FILE: archive/eosarch_reconstruct.py
================================================
#!/usr/bin/python3
# ------------------------------------------------------------------------------
# File: eosarch_reconstruct.py
# Author: Elvin-Alin Sindrilaru
# ------------------------------------------------------------------------------
#
# ******************************************************************************
# EOS - the CERN Disk Storage System
# Copyright (C) 2014 CERN/Switzerland
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see .
# ******************************************************************************
""" This tool can be used to reconstruct an archive file starting from the data
which is actually saved on tape (CASTOR). The tape systems needs to have an
XRootD interface. The way it works it that the archive file is constructed
locally and then is uploaded to the specified EOS root directory which must not
exist previously. The archive file is copied to EOS using the filename
.archive.purge.done so that the user is then able to get the data from the
tape system back into EOS. The UID which is provided when launching the command
is given permission to execute archive operations on the corresponding EOS
directory.
"""
from __future__ import print_function
import sys
import os
import ast
import errno
import stat
import time
import argparse
import tempfile
from eosarch.utils import exec_cmd, set_dir_info
try:
from XRootD import client
from XRootD.client.flags import DirListFlags, StatInfoFlags, QueryCode
except ImportError as ierr:
print("Missing xrootd-python package", file=sys.stderr)
class EosAccessException(Exception):
""" Exception raised when the current user does not have full sudo rights
EOS to perform the necessary operation for the archiving reconstruct.
"""
pass
class TapeAccessException(Exception):
""" Exception raised when the current user can not access information from
the tape system.
"""
pass
class ArchReconstruct(object):
""" Class responsible for reconstructing the archive file from an already
directory subtree from tape.
"""
def __init__(self, surl, durl, args):
""" Initialize the ArchReconstruct object
Args:
surl (XRootD.URL): URL to tape backend (CASTOR)
durl (XRootD.URL): URL to disk destination (EOS)
args (Namespace): Namespace object containing at least the following
attributes: uid (string): UID of archive owner in numeric format
gid (string): GID of archive owner in numeric format
svc_class (string): Service class used for retrieving the
archived data
skip_no_xs (bool): Skip files that don't have a checksum
"""
self.src_url = surl
self.dst_url = durl
self.uid, self.gid = args.uid, args.gid
self.svc_class = args.svc_class
self.skip_no_xs = args.skip_no_xs
self.ffiles = tempfile.TemporaryFile(mode='w+')
self.fdirs = tempfile.TemporaryFile(mode='w+')
self.farchive = tempfile.NamedTemporaryFile(mode='w+', delete=False)
print("Temp. archive file saved in: {0}".format(self.farchive.name),
file=sys.stdout)
def __del__(self):
""" Destructor - make sure we close the temporary files
"""
self.ffiles.close()
self.fdirs.close()
self.farchive.close()
def breadth_first(self):
""" Traverse the filesystem subtree using breadth-first search and
collect the directory information and file information into separate
files which will be merged in the end.
"""
# Dir format: type, rel_path, uid, gid, mode, attr
dir_meta = "[\"uid\", \"gid\", \"mode\", \"attr\"]"
dir_format = "[\"d\", \"{0}\", \"{1}\", \"{2}\", \"{3}\", {4}]"
# File format: type, rel_path, size, mtime, ctime, uid, gid, mode, xstype, xs
# Fake mtime and ctime subsecond precision
file_meta = ("[\"size\", \"mtime\", \"ctime\", \"uid\", \"gid\", \"mode\", "
"\"xstype\", \"xs\"]")
file_format = ("[\"f\", \"{0}\", \"{1}\", \"{2}.0\", \"{3}.0\", \"{4}\", "
"\"{5}\", \"{6}\", \"{7}\", \"{8}\"]")
# Attrs for 2 replica layout in EOS with current user the only one
# allowed to trigger archiving operations
replica_attr = ("{{\"sys.acl\": \"u:{0}:a,z:i\", "
"\"sys.forced.blockchecksum\": \"crc32c\", "
"\"sys.forced.blocksize\": \"4k\", "
"\"sys.forced.checksum\": \"adler\", "
"\"sys.forced.layout\": \"replica\", "
"\"sys.forced.nstripes\": \"2\", "
"\"sys.forced.space\": \"default\"}}").format(self.uid)
num_files, num_dirs = 0, 0
fs = client.FileSystem(str(self.src_url))
# Add root directory which is a bit special and set its metadata
# Dir mode is 42755 and file mode is 0644
dir_mode = oct(stat.S_IFDIR | stat.S_ISGID | stat.S_IRWXU
| stat.S_IRGRP | stat.S_IXGRP | stat.S_IROTH | stat.S_IXOTH)
dir_mode = dir_mode[1:] # remove leading 0 used for octal format
file_mode = oct(stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IROTH)
print(dir_format.format("./", self.uid, self.gid, dir_mode,
replica_attr), file=self.fdirs)
dict_attr = ast.literal_eval(replica_attr)
dict_dinfo = dict(zip(["uid", "gid", "mode", "attr"],
[self.uid, self.gid, dir_mode, dict_attr]))
set_dir_info(str(self.dst_url), dict_dinfo, list())
root_path = self.src_url.path
lst_dirs = [root_path]
while lst_dirs:
path = lst_dirs.pop(0)
st, listing = fs.dirlist(path, DirListFlags.STAT)
if not st.ok:
msg = "Failed to list dir={0}".format(self.src_url.path)
raise TapeAccessException(msg)
for elem in listing:
if elem.name == ".archive.init":
msg = ("Trying to reconstruct an already existing archive "
"in directory: {0}").format(path)
raise TapeAccessException(msg)
if elem.statinfo.flags & StatInfoFlags.IS_DIR:
num_dirs += 1
full_path = ''.join([path, elem.name, '/'])
rel_path = full_path.replace(root_path, "")
lst_dirs.append(full_path)
print(dir_format.format(rel_path, self.uid, self.gid, dir_mode,
replica_attr), file=self.fdirs)
else:
full_path = ''.join([path, elem.name])
rel_path = full_path.replace(root_path, "")
st, xs_resp = fs.query(QueryCode.CHECKSUM, full_path)
if not st.ok:
# If requested then skip the files that don't have a checksum
if self.skip_no_xs:
continue
msg = "File={0} failed xs query".format(full_path)
raise TapeAccessException(msg)
num_files += 1
# Result has an annoying \x00 character at the end and it
# contains the xs type (adler32) and the xs value
resp = xs_resp.strip('\x00\0\n ').split()
# If checksum value is not 8 char long then we need padding
if len(resp[1]) != 8 :
resp[1] = "{0:0>8}".format(resp[1])
if resp[0] != "adler32":
msg = ("Unknown checksum type={0} from tape system"
"".format(resp[0]))
raise TapeAccessException(msg)
print(file_format.format(rel_path, elem.statinfo.size,
elem.statinfo.modtime,
elem.statinfo.modtime,
self.uid, self.gid, file_mode,
"adler", resp[1]),
file=self.ffiles)
# Write archive file header
header_format = ("{{\"src\": \"{0}\", "
"\"dst\": \"{1}\", "
"\"svc_class\": \"{2}\", "
"\"dir_meta\": {3}, "
"\"file_meta\": {4}, "
"\"num_dirs\": {5}, "
"\"num_files\": {6}, "
"\"uid\": \"{7}\", "
"\"gid\": \"{8}\", "
"\"timestamp\": \"{9}\"}}")
print(header_format.format(str(self.dst_url), str(self.src_url),
self.svc_class, dir_meta, file_meta,
num_dirs, num_files, self.uid,
self.gid, time.time()),
file=self.farchive, end="\n")
# Rewind to the begining of the tmp files
self.fdirs.seek(0)
self.ffiles.seek(0)
# Write directories
for line in self.fdirs:
print(line, file=self.farchive, end="")
# Write files
for line in self.ffiles:
print(line, file=self.farchive, end="")
self.farchive.close()
def upload_archive(self):
""" Upload archive file to EOS directory. Note that we save it the the
name .archive.purge since this is the only possible operation when we
do such a reconstruct.
"""
cp = client.CopyProcess()
dst = ''.join([str(self.dst_url), ".archive.purge.done?eos.ruid=0&eos.rgid=0"])
cp.add_job(self.farchive.name, dst, force=True)
status = cp.prepare()
if not status.ok:
msg = "Failed while preparing to upload archive file to EOS"
raise EosAccessException(msg)
status = cp.run()
if not status.ok:
msg = "Failed while copying the archive file to EOS"
raise EosAccessException(msg)
else:
# Delete local archive file
try:
os.remove(self.farchive.name)
except OSError as __:
pass
def check_eos_access(url):
""" Check that the current user executing the programm is mapped as root in
EOS otherwise he will not be able to set all the necessary attributes for
the newly built archive. Make sure also that the root destination does not
exist already.
Args:
url (XRootD.URL): EOS URL to the destination path
Raises:
EosAccessException
"""
fwhoami = ''.join([url.protocol, "://", url.hostid, "//proc/user/?mgm.cmd=whoami"])
(status, out, __) = exec_cmd(fwhoami)
if not status:
msg = "Failed to execute EOS whoami command"
raise EosAccessException(msg)
# Extrach the uid and gid from the response
out.strip("\0\n ")
lst = out.split(' ')
try:
for token in lst:
if token.startswith("uid="):
uid = int(token[4:])
elif token.startswith("gid="):
gid = int(token[4:])
except ValueError as __:
msg = "Failed while parsing uid/gid response to EOS whoami command"
raise EosAccessException(msg)
if uid != 0 or gid != 0:
msg = "User {0} does not have full rights in EOS - aborting".format(os.getuid())
raise EosAccessException(msg)
# Check that root directory does not exist already
fs = client.FileSystem(str(url))
st, __ = fs.stat(url.path)
if st.ok:
msg = "EOS root directory already exists"
raise EosAccessException(msg)
fmkdir = ''.join([url.protocol, "://", url.hostid, "//proc/user/?mgm.cmd=mkdir&"
"mgm.path=", url.path])
(status, __, __) = exec_cmd(fmkdir)
if not status:
msg = "Failed to create EOS directory: {0}".format(url.path)
raise EosAccessException(msg)
def main():
""" Main function """
parser = argparse.ArgumentParser(description="Tool used to create an archive "
"file from an already existing archvie such "
"that the recall of the files can be done "
"using the EOS archiving tool. The files are "
"copied back to EOS using the 2replica layout.")
parser.add_argument("-s", "--src", required=True,
help="XRootD URL to archive tape source (CASTOR location)")
parser.add_argument("-d", "--dst", required=True,
help="XRootD URL to archive disk destination (EOS location)")
parser.add_argument("-c", "--svc_class", default="default",
help="Service class used for getting the files from tape")
parser.add_argument("-u", "--uid", default="0", help="User UID (numeric)")
parser.add_argument("-g", "--gid", default="0", help="User GID (numeric)")
parser.add_argument("-x", "--skip_no_xs", default=False, action="store_true",
help="Skip files that don't have a checksum")
args = parser.parse_args()
try:
int(args.uid)
int(args.gid)
except ValueError as __:
print("Error: UID/GID must be in numeric format", file=sys.stderr)
exit(errno.EINVAL)
# Make sure the source and destination are directories
if args.src[-1] != '/':
args.src += '/'
if args.dst[-1] != '/':
args.dst += '/'
# Check the source and destiantion are valid XRootD URLs
url_dst = client.URL(args.dst)
url_src = client.URL(args.src)
if not url_dst.is_valid() or not url_src.is_valid():
print("Error: Destination/Source URL is not valid", file=sys.stderr)
exit(errno.EINVAL)
avoid_local = ["localhost", "localhost4", "localhost6",
"localhost.localdomain", "localhost4.localdomain4",
"localhost6.localdomain6"]
if url_dst.hostname in avoid_local or url_src.hostname in avoid_local:
print("Please use FQDNs in the XRootD URLs", file=sys.stderr)
exit(errno.EINVAL)
try:
check_eos_access(url_dst)
except EosAccessException as err:
print("Error: {0}".format(str(err)), file=sys.stderr)
exit(errno.EPERM)
archr = ArchReconstruct(url_src, url_dst, args)
try:
archr.breadth_first()
archr.upload_archive()
except (TapeAccessException, IOError) as err:
print("Error: {0}".format(str(err)), file=sys.stderr)
exit(errno.EIO)
if __name__ == '__main__':
main()
================================================
FILE: archive/eosarch_run.py
================================================
#!/usr/bin/python3
# ------------------------------------------------------------------------------
# File: eosarch_run.py
# Author: Elvin-Alin Sindrilaru
# ------------------------------------------------------------------------------
#
# ******************************************************************************
# EOS - the CERN Disk Storage System
# Copyright (C) 2014 CERN/Switzerland
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see .
# ******************************************************************************
""" Script used for starting an archiving transfer in a subprocess which also
closes the open file descriptors such that there is no interference between
the processes using ZMQ.
"""
from __future__ import unicode_literals
from __future__ import print_function
import ast
import sys
import os
import logging
from errno import EIO, EINVAL
from hashlib import sha256
# Note: this is to be enabled only when we want to get the logging from the
# XrdCl - notice that this can grow very big, very fast. We also have to do
# this here before the XrdCl module gets initialised.
#os.environ['XRD_LOGLEVEL'] = "Debug"
#os.environ['XRD_LOGFILE'] = "/tmp/eosarch_xrdcl.log"
from eosarch import Transfer, NoErrorException, Configuration
try:
config = Configuration()
except Exception as err:
print("Configuration failed, error:{0}".format(err), file=sys.stderr)
raise
# Set location for local transfer files
for oper in [config.GET_OP, config.PUT_OP, config.PURGE_OP, config.DELETE_OP, config.BACKUP_OP]:
path = config.EOS_ARCHIVE_DIR + oper + '/'
config.DIR[oper] = path
req_dict = ast.literal_eval(sys.argv[1])
src = req_dict['src']
pos = src.find("//", src.find("//") + 1) + 1
root_dir = src[pos : src.rfind('/') + 1]
uuid = sha256(root_dir.encode()).hexdigest()
log_file = config.DIR[req_dict['cmd']] + uuid + ".log"
config.start_logging("transfer", log_file, False)
try:
tx = Transfer(req_dict, config)
except Exception as err:
config.logger.exception(err)
raise
try:
tx.run()
except IOError as err:
print("{0}".format(err), file=sys.stderr)
tx.logger.exception(err)
tx.tx_clean(False)
sys.exit(EIO)
except NoErrorException as err:
tx.tx_clean(True)
except Exception as err:
print("{0}".format(err), file=sys.stderr)
tx.logger.exception(err)
tx.tx_clean(False)
sys.exit(EINVAL)
================================================
FILE: archive/eosarchived.conf
================================================
# ------------------------------------------------------------------------------
# File: eosarchiverd.conf
# Author: Elvin-Alin Sindrilaru
# ------------------------------------------------------------------------------
#
# ******************************************************************************
# EOS - the CERN Disk Storage System
# Copyright (C) 2014 CERN/Switzerland
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see .
# ******************************************************************************
# Log level can be one of the following (it uses the syslog convention):
# info, notice, warn/warning, err/error, emerg/panic, debug, crit/critical, alert
LOG_LEVEL=debug
# Max number of transfer that can run in parallel
MAX_TRANSFERS=10
# Max number of transfers to be performed by one thread
BATCH_SIZE=10
# Max number of threads used per transfer process
MAX_THREADS=5
# Max number of reties for a batch of jobs that have failed. This is used to
# protect against transient failures, so that the user doesn't have to babysit
# the entire transfer.
MAX_RETRIES=5
# Poll timeout in milliseconds - period after which the master requests on its
# own for an update from the workers if there are no requests in the mean time.
# This also has the role to join the the worker processes which have finished
# in the mean time an print their returncode.
POLL_TIMEOUT=30000
# Join timeout in seconds for running threads inside a process
JOIN_TIMEOUT=1
# Maximum timeout value in seconds for a file entry to be migrated to tape.
# When this timeout expires the transfer process is retried. By default this
# is 86400 seconds (1 day).
#ARCHIVE_MAX_TIMEOUT=86400
================================================
FILE: archive/eosarchived.py
================================================
#!/usr/bin/python3
# ------------------------------------------------------------------------------
# File: eosarchived.py
# Author: Elvin-Alin Sindrilaru
# ------------------------------------------------------------------------------
#
# ******************************************************************************
# EOS - the CERN Disk Storage System
# Copyright (C) 2014 CERN/Switzerland
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see .
# ******************************************************************************
"""Module running the eosarchiverd daemon which transfers files between EOS
and CASTOR.
"""
from __future__ import unicode_literals
from __future__ import print_function
import os
import sys
import zmq
import stat
import subprocess
import ast
import logging
import time
import logging.handlers
from eosarch import ProcessInfo, Configuration
class Dispatcher(object):
""" Dispatcher daemon responsible for receiving requests from the clients
and then spawning the proper executing process for archiving operations
Attributes:
procs (dict): Dictionary containing the currently running processes
"""
def __init__(self, config):
self.config = config
self.logger = logging.getLogger("dispatcher")
self.procs = {}
self.pending = {}
self.backend_req, self.backend_pub, self.backend_poller = None, None, None
def run(self):
""" Server entry point which is responsible for spawning worker proceesses
that do the actual transfers (put/get).
"""
# Set the triggers for different types of commands
trigger = {self.config.PUT_OP: self.start_transfer,
self.config.GET_OP: self.start_transfer,
self.config.DELETE_OP: self.start_transfer,
self.config.PURGE_OP: self.start_transfer,
self.config.BACKUP_OP: self.start_transfer,
self.config.TX_OP: self.do_show_transfers,
self.config.KILL_OP: self.do_kill,
self.config.STATS: self.get_stats}
ctx = zmq.Context.instance()
self.logger.info("Started dispatcher process ...")
# Socket used for communication with EOS MGM
frontend = ctx.socket(zmq.REP)
addr = "ipc://" + self.config.FRONTEND_IPC
frontend.bind(addr)
# Socket used for communication with worker processes
self.backend_req = ctx.socket(zmq.ROUTER)
addr = "ipc://" + self.config.BACKEND_REQ_IPC
self.backend_req.bind(addr)
self.backend_pub = ctx.socket(zmq.PUB)
addr = "ipc://" + self.config.BACKEND_PUB_IPC
self.backend_pub.bind(addr)
self.backend_poller = zmq.Poller()
self.backend_poller.register(self.backend_req, zmq.POLLIN)
mgm_poller = zmq.Poller()
mgm_poller.register(frontend, zmq.POLLIN)
time.sleep(1)
# Attach orphan processes which may be running before starting the daemon
self.get_orphans()
while True:
events = dict(mgm_poller.poll(self.config.POLL_TIMEOUT))
self.update_status()
if events and events.get(frontend) == zmq.POLLIN:
try:
req_json = frontend.recv_json()
except zmq.ZMQError as err:
if err.errno == zmq.ETERM:
break # shutting down, exit
else:
raise
except ValueError as err:
self.logger.error("Command in not in JSON format")
frontend.send("ERROR error:command not in JSON format")
continue
self.logger.debug("Received command: {0}".format(req_json))
try:
reply = trigger[req_json['cmd']](req_json)
except KeyError as err:
self.logger.error("Unknown command type: {0}".format(req_json['cmd']))
reply = "ERROR error: operation not supported"
raise
frontend.send_string(reply)
def get_orphans(self):
""" Get orphan transfer processes from previous runs of the daemon
"""
self.logger.info("Get orphans")
tries = 0
num = self.num_processes()
# Get status for orphan processes
while len(self.procs) != num and tries < 10:
tries += 1
self.procs.clear()
num = self.num_processes()
self.backend_pub.send_multipart([b"[MASTER]", b"{'cmd': 'orphan_status'}"])
while True:
events = dict(self.backend_poller.poll(1000))
if events and events.get(self.backend_req) == zmq.POLLIN:
[__, resp] = self.backend_req.recv_multipart()
self.logger.info("Received response: {0}".format(resp))
# Convert response to python dictionary
dict_resp = ast.literal_eval(resp.decode())
if not isinstance(dict_resp, dict):
err_msg = "Response={0} is not a dictionary".format(resp)
self.logger.error(err_msg)
continue
pinfo = ProcessInfo(None)
pinfo.update(dict_resp)
if pinfo.uuid not in self.procs:
self.procs[pinfo.uuid] = pinfo
else: # TIMEOUT
self.logger.info("Get orphans status timeout")
break
self.logger.debug(("Try={0}, got {1}/{2} orphan processe responses"
"").format(tries, len(self.procs), num))
def num_processes(self):
""" Get the number of running archive processes on the current system by
executing the ps command
Returns:
Number of running processes
Raises:
ValueError in case the output of ps is not a valid pid number
"""
pid = os.getpid()
# TODO: make the resolution of the eosarch_run.py more elegant
exec_fname = "eosarch_run.py"
ps_proc = subprocess.Popen([("ps -eo pid,ppid,comm | egrep \"{0}\$\" | "
"awk '{{print $1}}'").format(exec_fname)],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=True)
ps_out, __ = ps_proc.communicate()
if len(ps_out) == 0:
return 0
ps_out = ps_out.strip('\0\n')
proc_lst = ps_out.split('\n')
try:
num = len([x for x in proc_lst if pid != int(x)])
except ValueError as __:
self.logger.error("ps output x={0} is not a valid pid value".format(x))
raise
return num
def update_status(self):
""" Update the status of the processes
"""
self.backend_pub.send_multipart([b"[MASTER]", b"{'cmd': 'status'}"])
recv_uuid = []
while len(recv_uuid) < len(self.procs):
events = dict(self.backend_poller.poll(400))
if events and events.get(self.backend_req) == zmq.POLLIN:
[__, resp] = self.backend_req.recv_multipart()
self.logger.debug("Received response: {0}".format(resp))
# Convert response to python dictionary
dict_resp = ast.literal_eval(resp.decode())
if not isinstance(dict_resp, dict):
self.logger.error("Response is not a dictionary")
continue
# Update the local info about the process
try:
self.procs[dict_resp['uuid']].update(dict_resp)
except KeyError as __:
err_msg = ("Unknown process response:{0}").format(dict_resp)
self.logger.error(err_msg)
recv_uuid.append(dict_resp['uuid'])
else: # TIMEOUT
self.logger.debug("Update status timeout")
break
# Check if processes that didn't respond are still alive
unresp = [proc for (uuid, proc) in self.procs.items()
if uuid not in recv_uuid]
for pinfo in unresp:
if not pinfo.is_alive():
del self.procs[pinfo.uuid]
# Submit any pending transfers while the limit is not exceeded
while len(self.procs) < self.config.MAX_TRANSFERS and self.pending:
(__, pinfo) = self.pending.popitem() # take the oldest one
# Don't pipe stdout and stderr as we log all the output
pinfo.proc = subprocess.Popen(['/usr/bin/eosarch_run.py',
"{0}".format(pinfo.orig_req)],
close_fds=True)
pinfo.pid = pinfo.proc.pid
self.procs[pinfo.uuid] = pinfo
def start_transfer(self, req_json):
""" Start new transfer
Args:
req_json (json): New transfer information which must include:
{
cmd: get/put/delete/purge/backup,
src: full URL to archive file in EOS.
opt: retry | ''
uid: client uid
gid: client gid
}
Returns:
A message which is sent to the EOS MGM informing about the status
of the request.
"""
self.logger.debug("Start transfer {0}".format(req_json))
pinfo = ProcessInfo(req_json)
self.logger.debug("Creating job={0}, path={1}".format(pinfo.uuid, pinfo.root_dir))
if pinfo.uuid in self.procs:
err_msg = "Job with same uuid={0} already exists".format(pinfo.uuid)
self.logger.error(err_msg)
return "ERROR error: job with same signature exists"
if len(self.procs) >= self.config.MAX_TRANSFERS:
self.logger.warning("Maximum number of concurrent transfers reached, "
"adding job={0} to the pending list".format(pinfo.uuid))
self.pending[pinfo.uuid] = pinfo
return "OK Id={0} added to the pending list".format(pinfo.uuid)
# Don't pipe stdout and stderr as we log all the output
pinfo.proc = subprocess.Popen(['/usr/bin/eosarch_run.py', "{0}".format(req_json)],
close_fds=True)
pinfo.pid = pinfo.proc.pid
self.procs[pinfo.uuid] = pinfo
return "OK Id={0}".format(pinfo.uuid)
def do_show_transfers(self, req_json):
""" Show onging transfers
Args:
req_json (JSON): Command in JSON format include:
{
cmd: transfers,
opt: all/get/put/purge/delete/uuid,
uid: uid,
gid: gid
}
Returns:
String with the result of the listing
"""
msg = "OK "
row_data, proc_list = [], []
ls_type = req_json['opt']
self.logger.debug("Show transfers type={0}".format(ls_type))
if ls_type == "all":
proc_list = [*self.procs.values(),*self.pending.values()]
elif ls_type in self.procs:
# ls_type is a transfer uuid
proc_list.append(self.procs[ls_type])
else:
proc_list = [elem for elem in self.procs.values() if elem.op == ls_type]
proc_list.extend([elem for elem in self.pending.values() if elem.op == ls_type])
for proc in proc_list:
line = ("date={0},uuid={1},path={2},op={3},status={4}".format(
time.asctime(time.localtime(proc.timestamp)), proc.uuid,
proc.orig_req['src'], proc.op, proc.status))
msg = '\n'.join([msg, line])
return msg
def do_kill(self, req_json):
""" Kill transfer.
Args:
req_json (JSON command): Arguments for kill command include:
{
cmd: kill,
opt: uuid,
uid: uid,
gid: gid
}
"""
msg = "OK"
job_uuid = req_json['opt']
uid, gid = int(req_json['uid']), int(req_json['gid'])
try:
proc = self.procs[job_uuid]
except KeyError as __:
msg = "ERROR error: job not found"
return msg
if (uid == 0 or uid == proc.uid or
(uid != proc.uid and gid == proc.gid)):
self.logger.debug("Kill uuid={0} pid={1}".format(job_uuid, proc.pid))
kill_proc = subprocess.Popen(['kill', '-SIGTERM', str(proc.pid)],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
_, err = kill_proc.communicate()
if kill_proc.returncode:
msg = "ERROR error:" + err
else:
self.logger.error(("User uid/gid={0}/{1} permission denied to kill job "
"with uid/gid={2}/{3}").format(uid, gid,
proc.uid, proc.gid))
msg = "ERROR error: Permission denied - you are not owner of the job"
self.logger.debug("Kill pid={0}, msg={0}".format(proc.pid, msg))
return msg
def get_stats(self, req_json):
""" Get archive daemon stats info.
Args:
req_json (JSON command): Arguments for stats command include:
{
cmd: stats,
opt: \"\",
uid: uid,
gid: gid
}
Returns: string containing information about number of slots
"""
return "OK max={0} running={1} pending={2}".format(
self.config.MAX_TRANSFERS, len(self.procs), len(self.pending))
def main():
""" Main function """
try:
config = Configuration()
except Exception as err:
print("Configuration failed, error:{0}".format(err), file=sys.stderr)
raise
config.start_logging("dispatcher", config.LOG_FILE, True)
logger = logging.getLogger("dispatcher")
config.display()
config.DIR = {}
# Create the local directory structure in /var/eos/archive/
# i.e /var/eos/archive/get/, /var/eos/archive/put/ etc.
for oper in [config.GET_OP,
config.PUT_OP,
config.PURGE_OP,
config.DELETE_OP,
config.BACKUP_OP]:
path = config.EOS_ARCHIVE_DIR + oper + '/'
config.DIR[oper] = path
try:
os.mkdir(path)
except OSError as __:
pass # directory exists
# Prepare ZMQ IPC files
os.umask(0o002) # set files with 775 by default
for ipc_file in [config.FRONTEND_IPC,
config.BACKEND_REQ_IPC,
config.BACKEND_PUB_IPC]:
if not os.path.exists(ipc_file):
try:
open(ipc_file, 'w').close()
os.chmod(ipc_file, stat.S_IRWXU | stat.S_IRWXG | stat.S_IRWXO)
except OSError as err:
err_msg = ("Failed setting permissions on the IPC socket"
" file={0}").format(ipc_file)
logger.error(err_msg)
raise
except IOError as err:
err_msg = ("Failed creating IPC socket file={0}").format(ipc_file)
logger.error(err_msg)
raise
# Create dispatcher object
dispatcher = Dispatcher(config)
try:
dispatcher.run()
except Exception as err:
logger.exception(err)
if __name__ == '__main__':
try:
main()
except ValueError as __:
# This is to deal with exceptions thrown when trying to close the log
# file which is already deleted manually by an exterior process
pass
================================================
FILE: archive/eosarchived.service
================================================
# ----------------------------------------------------------------------
# File: eosarchived.service
# Author: Elvin Sindrilaru - CERN
# ----------------------------------------------------------------------
#
# ************************************************************************
# * EOS - the CERN Disk Storage System *
# * Copyright (C) 2018 CERN/Switzerland *
# * *
# * This program is free software: you can redistribute it and/or modify *
# * it under the terms of the GNU General Public License as published by *
# * the Free Software Foundation, either version 3 of the License, or *
# * (at your option) any later version. *
# * *
# * This program is distributed in the hope that it will be useful, *
# * but WITHOUT ANY WARRANTY; without even the implied warranty of *
# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
# * GNU General Public License for more details. *
# * *
# * You should have received a copy of the GNU General Public License *
# * along with this program. If not, see .*
# ************************************************************************
[Unit]
Description=EOS archiver daemon
After=network-online.target local-fs.target
Wants=network-online.target local-fs.target
[Service]
WorkingDirectory=/var/eos/
#LogsDirectory=eos/archive/
EnvironmentFile=/etc/sysconfig/eosarchived_env
ExecStart=/usr/bin/eosarchived.py
Type=simple
User=eosarchi
Group=daemon
Restart=on-abort
RestartSec=5
LimitNOFILE=65000
KillMode=mixed
SuccessExitStatus=KILL
================================================
FILE: archive/eosarchived_env.sysconfig
================================================
# Options for the eosarchived daemon
# EOSARCHIVED_OPTIONS=
# Enable core dumping
DAEMON_COREFILE_LIMIT="unlimited"
# Directory where log files are saved
LOG_DIR="/var/log/eos/archive/"
# This directory must match the one set in xrd.cf.mgm as it is used for the
# communication between the MGM and the eosarchived daemon
EOS_ARCHIVE_DIR=/var/eos/archive/
# Configuration file which can be modified while the daemon is running and
# whose changes are automatically picked up by new transfers
EOS_ARCHIVE_CONF=/etc/eosarchived.conf
# This is the location of the archive keytab file containing just **one** entry
# for the user account under which the eosarchived daemon is running. The same
# entry need to be present in the eos.keytab file so that the eosarchived can
# have full access to the EOS.
XrdSecSSSKT=/etc/archive.keytab
# Make eos-xrootd python bindings higher priority
PYTHONPATH=/opt/eos/xrootd/lib64/python3.6/site-packages/
================================================
FILE: archive/opt-eos-xrootd.pth
================================================
/opt/eos/xrootd/lib64/python3.6/site-packages/
================================================
FILE: auth_plugin/CMakeLists.txt
================================================
#-------------------------------------------------------------------------------
# File: CMakeLists.txt
# Author: Elvin-Alin Sindrilaru CERN
#-------------------------------------------------------------------------------
# ************************************************************************
# * EOS - the CERN Disk Storage System *
# * Copyright (C) 2013 CERN/Switzerland *
# * *
# * This program is free software: you can redistribute it and/or modify *
# * it under the terms of the GNU General Public License as published by *
# * the Free Software Foundation, either version 3 of the License, or *
# * (at your option) any later version. *
# * *
# * This program is distributed in the hope that it will be useful, *
# * but WITHOUT ANY WARRANTY; without even the implied warranty of *
# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
# * GNU General Public License for more details. *
# * *
# * You should have received a copy of the GNU General Public License *
# * along with this program. If not, see .*
# ************************************************************************
#-------------------------------------------------------------------------------
# Generate all protocol buffer files
#-------------------------------------------------------------------------------
PROTOBUF_GENERATE_CPP(XSE_SRCS XSE_HDRS proto/XrdSecEntity.proto)
PROTOBUF_GENERATE_CPP(XOEI_SRCS XOEI_HDRS proto/XrdOucErrInfo.proto)
PROTOBUF_GENERATE_CPP(XSFS_SRCS XSFS_HDRS proto/XrdSfsFSctl.proto)
PROTOBUF_GENERATE_CPP(STAT_SRCS STAT_HDRS proto/Stat.proto)
PROTOBUF_GENERATE_CPP(FSCTL1_SRCS FSCTL1_HDRS proto/Fsctl.proto)
PROTOBUF_GENERATE_CPP(FSCTL2_SRCS FSCTL2_HDRS proto/FS_ctl.proto)
PROTOBUF_GENERATE_CPP(CHMOD_SRCS CHMOD_HDRS proto/Chmod.proto)
PROTOBUF_GENERATE_CPP(CHKSUM_SRCS CHKSUM_HDRS proto/Chksum.proto)
PROTOBUF_GENERATE_CPP(EXISTS_SRCS EXISTS_HDRS proto/Exists.proto)
PROTOBUF_GENERATE_CPP(MKDIR_SRCS MKDIR_HDRS proto/Mkdir.proto)
PROTOBUF_GENERATE_CPP(REMDIR_SRCS REMDIR_HDRS proto/Remdir.proto)
PROTOBUF_GENERATE_CPP(REM_SRCS REM_HDRS proto/Rem.proto)
PROTOBUF_GENERATE_CPP(RENAME_SRCS RENAME_HDRS proto/Rename.proto)
PROTOBUF_GENERATE_CPP(XSP_SRCS XSP_HDRS proto/XrdSfsPrep.proto)
PROTOBUF_GENERATE_CPP(PREPARE_SRCS PREPARE_HDRS proto/Prepare.proto)
PROTOBUF_GENERATE_CPP(TRUNCATE_SRCS TRUNCATE_HDRS proto/Truncate.proto)
PROTOBUF_GENERATE_CPP(DOPEN_SRCS DOPEN_HDRS proto/DirOpen.proto)
PROTOBUF_GENERATE_CPP(DREAD_SRCS DREAD_HDRS proto/DirRead.proto)
PROTOBUF_GENERATE_CPP(DFNAME_SRCS DFNAME_HDRS proto/DirFname.proto)
PROTOBUF_GENERATE_CPP(DCLOSE_SRCS DCLOSE_HDRS proto/DirClose.proto)
PROTOBUF_GENERATE_CPP(FOPEN_SRCS FOPEN_HDRS proto/FileOpen.proto)
PROTOBUF_GENERATE_CPP(FFNAME_SRCS FFNAME_HDRS proto/FileFname.proto)
PROTOBUF_GENERATE_CPP(FSTAT_SRCS FSTAT_HDRS proto/FileStat.proto)
PROTOBUF_GENERATE_CPP(FREAD_SRCS FREAD_HDRS proto/FileRead.proto)
PROTOBUF_GENERATE_CPP(FWRITE_SRCS FWRITE_HDRS proto/FileWrite.proto)
PROTOBUF_GENERATE_CPP(FCLOSE_SRCS FCLOSE_HDRS proto/FileClose.proto)
PROTOBUF_GENERATE_CPP(REQ_SRCS REQ_HDRS proto/Request.proto)
PROTOBUF_GENERATE_CPP(RESP_SRCS RESP_HDRS proto/Response.proto)
set(AUTH_PROTO_SRCS
${XSE_SRCS} ${XOEI_SRCS} ${XSFS_SRCS} ${STAT_SRCS}
${FSCTL1_SRCS} ${FSCTL2_SRCS} ${REQ_SRCS} ${RESP_SRCS}
${CHMOD_SRCS} ${CHKSUM_SRCS} ${EXISTS_SRCS} ${MKDIR_SRCS}
${REMDIR_SRCS} ${REM_SRCS} ${RENAME_SRCS} ${XSP_SRCS}
${PREPARE_SRCS} ${TRUNCATE_SRCS} ${DOPEN_SRCS} ${DREAD_SRCS}
${DFNAME_SRCS} ${DCLOSE_SRCS} ${FOPEN_SRCS} ${FCLOSE_SRCS}
${FFNAME_SRCS} ${FSTAT_SRCS} ${FREAD_SRCS} ${FWRITE_SRCS})
set(AUTH_PROTO_HDRS
${XSE_HDRS} ${XOEI_HDRS} ${XSFS_HDRS} ${STAT_HDRS}
${FSCTL1_HDRS} ${FSCTL2_HDRS} ${REQ_HDRS} ${RESP_HDRS}
${CHMOD_HDRS} ${CHKSUM_HDRS} ${EXITS_HDRS} ${MKDIR_HDRS}
${REMDIR_HDRS} ${REM_HDRS} ${RENAME_HDRS} ${XSP_HDRS}
${PREPARE_HDRS} ${TRUNCATE_HDRS} ${DOPEN_HDRS} ${DREAD_HDRS}
${DFNAME_HDRS} ${DCLOSE_HDRS} ${FOPEN_HDRS} ${FCLOSE_HDRS}
${FFNAME_HDRS} ${FSTAT_HDRS} ${FREAD_HDRS} ${FWRITE_HDRS})
set_source_files_properties(
${AUTH_PROTO_SRCS}
${AUTH_PROTO_HDRS}
PROPERTIES GENERATED 1)
#-------------------------------------------------------------------------------
# EosAuthProto-Objects
#-------------------------------------------------------------------------------
add_library(EosAuthProto-Objects OBJECT
ProtoUtils.cc ProtoUtils.hh
${AUTH_PROTO_SRCS} ${AUTH_PROTO_HDRS})
target_link_libraries(EosAuthProto-Objects PUBLIC
PROTOBUF::PROTOBUF
XROOTD::UTILS
XROOTD::PRIVATE)
target_include_directories(EosAuthProto-Objects PUBLIC
$
$)
set_target_properties(EosAuthProto-Objects
PROPERTIES POSITION_INDEPENDENT_CODE TRUE)
#-------------------------------------------------------------------------------
# EosAuthOfs library
#-------------------------------------------------------------------------------
add_library(EosAuthOfs-${XRDPLUGIN_SOVERSION} MODULE
EosAuthOfs.cc EosAuthOfs.hh
EosAuthOfsFile.cc EosAuthOfsFile.hh
EosAuthOfsDirectory.cc EosAuthOfsDirectory.hh)
target_link_libraries(
EosAuthOfs-${XRDPLUGIN_SOVERSION} PRIVATE
EosAuthProto-Objects
EosCommon
ZMQ::ZMQ
XROOTD::PRIVATE)
install(TARGETS EosAuthOfs-${XRDPLUGIN_SOVERSION}
LIBRARY DESTINATION ${CMAKE_INSTALL_FULL_LIBDIR}
RUNTIME DESTINATION ${CMAKE_INSTALL_FULL_BINDIR}
ARCHIVE DESTINATION ${CMAKE_INSTALL_FULL_LIBDIR})
================================================
FILE: auth_plugin/EosAuthOfs.cc
================================================
//------------------------------------------------------------------------------
// File: EosAuthOfs.cc
// Author: Elvin-Alin Sindrilaru CERN
//------------------------------------------------------------------------------
/************************************************************************
* EOS - the CERN Disk Storage System *
* Copyright (C) 2013 CERN/Switzerland *
* *
* This program is free software: you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation, either version 3 of the License, or *
* (at your option) any later version. *
* *
* This program is distributed in the hope that it will be useful, *
* but WITHOUT ANY WARRANTY; without even the implied warranty of *
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
* GNU General Public License for more details. *
* *
* You should have received a copy of the GNU General Public License *
* along with this program. If not, see .*
************************************************************************/
#include
#include
#include
#include
#include
#include
#include "EosAuthOfs.hh"
#include "ProtoUtils.hh"
#include "EosAuthOfsDirectory.hh"
#include "EosAuthOfsFile.hh"
#include "common/SymKeys.hh"
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
// The global OFS handle
eos::auth::EosAuthOfs* eos::auth::gOFS = nullptr;
extern XrdSysError OfsEroute;
extern XrdOfs* XrdOfsFS;
XrdVERSIONINFO(XrdSfsGetFileSystem, AuthOfs);
XrdVERSIONINFO(XrdSfsGetFileSystem2, AuthOfs);
//------------------------------------------------------------------------------
// Filesystem Plugin factory function
//------------------------------------------------------------------------------
extern "C"
{
//------------------------------------------------------------------------------
//! Filesystem Plugin factory function
//!
//! @description FileSystem2 version, to allow passing configuration info back
//! to XRootD. Configure with: xrootd.fslib -2 libXrdEosMgm.so
//!
//! @param native_fs (not used)
//! @param lp the logger object
//! @param configfn the configuration file name
//! @param envP pass configuration information back to XrdXrootd
//!
//! @returns configures and returns our MgmOfs object
//------------------------------------------------------------------------------
XrdSfsFileSystem*
XrdSfsGetFileSystem2(XrdSfsFileSystem* native_fs,
XrdSysLogger* lp,
const char* configfn,
XrdOucEnv* envP)
{
if (eos::auth::gOFS) {
// File system object already initialized
return eos::auth::gOFS;
}
// Do the herald thing
OfsEroute.SetPrefix("AuthOfs_");
OfsEroute.logger(lp);
XrdOucString version = "AuthOfs (Object Storage File System) ";
version += VERSION;
OfsEroute.Say("++++++ (c) 2013 CERN/IT-DSS ", version.c_str());
// Initialize the subsystems
eos::auth::gOFS = new eos::auth::EosAuthOfs();
eos::auth::gOFS->ConfigFN = (configfn && *configfn ? strdup(configfn) : 0);
if (eos::auth::gOFS->Configure(OfsEroute, envP)) {
return 0;
}
XrdOfsFS = eos::auth::gOFS;
return eos::auth::gOFS;
}
//------------------------------------------------------------------------------
//! Filesystem Plugin factory function
//!
//! @param native_fs (not used)
//! @param lp the logger object
//! @param configfn the configuration file name
//!
//! @returns configures and returns our MgmOfs object
//------------------------------------------------------------------------------
XrdSfsFileSystem*
XrdSfsGetFileSystem(XrdSfsFileSystem* native_fs,
XrdSysLogger* lp,
const char* configfn)
{
if (eos::auth::gOFS) {
// File system object already initialized
OfsEroute.SetPrefix("AuthOfs_");
OfsEroute.logger(lp);
OfsEroute.Say("info=\"return already loaded AUTH OFS pointer\"");
return eos::auth::gOFS;
}
return XrdSfsGetFileSystem2(native_fs, lp, configfn, nullptr);
}
} // extern "C"
EOSAUTHNAMESPACE_BEGIN
//------------------------------------------------------------------------------
// Constructor
//------------------------------------------------------------------------------
EosAuthOfs::EosAuthOfs():
XrdOfs(), eos::common::LogId(), proxy_tid(0), mFrontend(0),
mSizePoolSocket(5), mPort(0), mCollapsePort(0), mLogLevel(LOG_INFO)
{
// Initialise the ZMQ client
mZmqContext = new zmq::context_t(1);
mBackend = std::make_pair(std::string(""), (zmq::socket_t*)0);
// Set Logging parameters
XrdOucString unit = "auth@localhost";
// setup the circular in-memory log buffer
eos::common::Logging& g_logging = eos::common::Logging::GetInstance();
g_logging.SetLogPriority(mLogLevel);
g_logging.SetUnit(unit.c_str());
eos_info("info=\"logging configured\"");
}
//------------------------------------------------------------------------------
// Destructor
//------------------------------------------------------------------------------
EosAuthOfs::~EosAuthOfs()
{
zmq::socket_t* socket;
// Kill the auth proxy thread
if (proxy_tid) {
XrdSysThread::Cancel(proxy_tid);
XrdSysThread::Join(proxy_tid, 0);
}
// Release memory
while (mPoolSocket.try_pop(socket)) {
delete socket;
}
delete mFrontend;
delete mBackend.second;
delete mZmqContext;
// Free configuration file name allocated via strdup during initialization
if (ConfigFN) {
free(ConfigFN);
ConfigFN = nullptr;
}
}
//------------------------------------------------------------------------------
// Configure routine
//------------------------------------------------------------------------------
int
EosAuthOfs::Configure(XrdSysError& error, XrdOucEnv* envP)
{
int NoGo = 0;
int cfgFD;
char* var;
const char* val;
std::string space_tkn;
// Configure the basic XrdOfs and exit if not successful
NoGo = XrdOfs::Configure(error, envP);
eos::common::Logging& g_logging = eos::common::Logging::GetInstance();
if (NoGo) {
return NoGo;
}
mPort = myPort;
mCollapsePort = mPort; // by default we collapse on the same AUTH service port on a remote machine
// Get the hostname
const char* errtext = 0;
const char* host_name = XrdNetUtils::MyHostName(0, &errtext);
if (!host_name) {
error.Emsg("Config", "hostname is invalid : %s", host_name);
return 1;
}
XrdNetAddr* addrs = 0;
int nAddrs = 0;
const char* err = XrdNetUtils::GetAddrs(host_name, &addrs, nAddrs,
XrdNetUtils::allIPv64,
XrdNetUtils::NoPortRaw);
free(const_cast(host_name));
if (err) {
error.Emsg("Config", "hostname is invalid : %s", err);
return 1;
}
if (nAddrs == 0) {
error.Emsg("Config", "hostname is invalid");
return 1;
}
char buffer[64];
int length = addrs[0].Format(buffer, sizeof(buffer),
XrdNetAddrInfo::fmtAddr,
XrdNetAddrInfo::noPortRaw);
delete [] addrs;
if (length == 0) {
error.Emsg("Config", "hostname is invalid");
return 1;
}
mManagerIp.assign(buffer, length);
// Extract the manager from the config file
XrdOucStream Config(&error, getenv("XRDINSTANCE"));
// Read in the auth configuration from the xrd.cf.auth file
if (!ConfigFN || !*ConfigFN) {
NoGo = 1;
error.Emsg("Configure", "no configure file");
} else {
// Try to open the configuration file.
if ((cfgFD = open(ConfigFN, O_RDONLY, 0)) < 0) {
return error.Emsg("Configure", errno, "open config file fn=", ConfigFN);
}
Config.Attach(cfgFD);
std::string auth_tag = "eosauth.";
while ((var = Config.GetMyFirstWord())) {
if (!strncmp(var, auth_tag.c_str(), auth_tag.length())) {
var += auth_tag.length();
// Get EOS instance to which we dispatch requests. Note that the port is the one
// waiting for authentication requests and not the usual one i.e 1094. The presence
// of the mgm parameter is mandatory.
std::string mgm_instance;
std::string option_tag = "mgm";
if (!strncmp(var, option_tag.c_str(), option_tag.length())) {
if ((val = Config.GetWord())) {
mgm_instance = val;
if (mgm_instance.find(":") != std::string::npos) {
mBackend = std::make_pair(mgm_instance, (zmq::socket_t*)0);
}
} else {
// This parameter is critical
error.Emsg("Configure ", "No EOS mgm instance provided");
NoGo = 1;
}
}
// Get number of sockets in the pool by default 10
option_tag = "collapseport";
if (!strncmp(var, option_tag.c_str(), option_tag.length())) {
if (!(val = Config.GetWord())) {
error.Emsg("Configure ", "No collapseport specified");
} else {
mCollapsePort = atoi(val);
}
}
// Get number of sockets in the pool by default 10
option_tag = "numsockets";
if (!strncmp(var, option_tag.c_str(), option_tag.length())) {
if (!(val = Config.GetWord())) {
error.Emsg("Configure ", "No number of sockets specified");
} else {
mSizePoolSocket = atoi(val);
}
}
// Get log level by default LOG_INFO
option_tag = "loglevel";
if (!strncmp(var, option_tag.c_str(), option_tag.length())) {
if (!(val = Config.GetWord())) {
error.Emsg("Config", "argument for debug level invalid set to ERR.");
mLogLevel = LOG_INFO;
} else {
std::string str_val(val);
if (isdigit(str_val[0])) {
// The level is given as a number
mLogLevel = atoi(val);
} else {
// The level is given as a string
mLogLevel = g_logging.GetPriorityByString(val);
}
error.Say("=====> eosauth.loglevel: ",
g_logging.GetPriorityString(mLogLevel), "");
}
// Set the new log level
g_logging.SetLogPriority(mLogLevel);
}
}
}
// Check and connect at least to an MGM master
if (!mBackend.first.empty()) {
if ((XrdSysThread::Run(&proxy_tid, EosAuthOfs::StartAuthProxyThread,
static_cast(this), 0, "Auth Proxy Thread"))) {
eos_err("cannot start the authentication proxy thread");
NoGo = 1;
}
// Create a pool of sockets connected to the master proxy service
for (int i = 0; i < mSizePoolSocket; i++) {
// Set socket receive timeout to 5 seconds
zmq::socket_t* socket = new zmq::socket_t(*mZmqContext, ZMQ_REQ);
int timeout_mili = 5000;
socket->set(zmq::sockopt::rcvtimeo, timeout_mili);
int socket_linger = 0;
socket->set(zmq::sockopt::linger, socket_linger);
std::string endpoint = "inproc://proxyfrontend";
// Try in a loop to connect to the proxyfrontend as it can take a while for
// the proxy thread to do the binding, therefore connect can fail
while (1) {
try {
socket->connect(endpoint.c_str());
} catch (zmq::error_t& err) {
eos_warning("dealing with connect exception, retrying ...");
continue;
}
break;
}
mPoolSocket.push(socket);
}
} else {
eos_err("No master MGM specified e.g. eos.master.cern.ch:15555");
NoGo = 1;
}
close(cfgFD);
}
//----------------------------------------------------------------------------
// Build the adler & sha1 checksum of the default keytab file
//----------------------------------------------------------------------------
XrdOucString keytabcks = "unaccessible";
std::string keytab_path = "/etc/eos.keytab";
int fd = ::open(keytab_path.c_str(), O_RDONLY);
XrdOucString symkey = "";
if (fd >= 0) {
char buffer[65535];
char keydigest[SHA_DIGEST_LENGTH + 1];
SHA_CTX sha1;
SHA1_Init(&sha1);
size_t nread = ::read(fd, buffer, sizeof(buffer));
if (nread > 0) {
unsigned int adler;
SHA1_Update(&sha1, (const char*) buffer, nread);
adler = adler32(0L, Z_NULL, 0);
adler = adler32(adler, (const Bytef*) buffer, nread);
char sadler[1024];
snprintf(sadler, sizeof(sadler) - 1, "%08x", adler);
keytabcks = sadler;
} else {
eos_err("Failed while readling, error: %s", strerror(errno));
close(fd);
return 1;
}
SHA1_Final((unsigned char*) keydigest, &sha1);
eos::common::SymKey::Base64Encode(keydigest, SHA_DIGEST_LENGTH, symkey);
close(fd);
} else {
eos_err("Failed to open keytab file: %s", keytab_path.c_str());
return 1;
}
eos_notice("AUTH_HOST=%s AUTH_PORT=%ld VERSION=%s RELEASE=%s KEYTABADLER=%s",
mManagerIp.c_str(), myPort, VERSION, RELEASE, keytabcks.c_str());
if (!eos::common::gSymKeyStore.SetKey64(symkey.c_str(), 0)) {
eos_crit("unable to store the created symmetric key %s", symkey.c_str());
NoGo = 1;
}
return NoGo;
}
//------------------------------------------------------------------------------
// Authentication proxy thread startup function
//------------------------------------------------------------------------------
void*
EosAuthOfs::StartAuthProxyThread(void* pp)
{
EosAuthOfs* ofs = static_cast(pp);
ofs->AuthProxyThread();
return 0;
}
//------------------------------------------------------------------------------
// Authentication proxt thread which forwards requests form the clients
// to the proper MGM intance.
//------------------------------------------------------------------------------
void
EosAuthOfs::AuthProxyThread()
{
// Bind the client facing socket
mFrontend = new zmq::socket_t(*mZmqContext, ZMQ_ROUTER);
mFrontend->bind("inproc://proxyfrontend");
// Connect sockets facing the MGM nodes - master and slave
std::ostringstream sstr;
mBackend = std::make_pair(mBackend.first,
new zmq::socket_t(*mZmqContext, ZMQ_DEALER));
sstr << "tcp://" << mBackend.first;
mBackend.second->connect(sstr.str().c_str());
OfsEroute.Say("=====> connected to MGM: ", mBackend.first.c_str());
// Set the master to point to the master MGM - no need for lock
auto master = mBackend.second;
int rc = -1;
zmq::message_t msg;
// Start the proxy using the first entry
int more;
int poll_size = 2;
zmq::pollitem_t items[3] = {
{ (void*)* mFrontend, 0, ZMQ_POLLIN, 0},
{ (void*)* mBackend.second, 0, ZMQ_POLLIN, 0}
};
// Main loop in which the proxy thread accepts request from the clients and
// then he forwards them to the current master MGM. The master MGM can change
// at any point.
while (true) {
// Wait while there are either requests or replies to process
try {
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wdeprecated-declarations"
rc = zmq::poll(&items[0], poll_size, -1);
#pragma GCC diagnostic pop
} catch (zmq::error_t& e) {
eos_err("Exception thrown: %s", e.what());
}
if (rc < 0) {
eos_err("error in poll");
return;
}
// Process a request
if (items[0].revents & ZMQ_POLLIN) {
eos_debug("got frontend event");
zmq::recv_flags rf = zmq::recv_flags::none;
while (true) {
if (!mFrontend->recv(msg, rf).has_value()) {
eos_err("error while recv on frontend");
return;
}
try {
more = mFrontend->get(zmq::sockopt::rcvmore);
} catch (zmq::error_t& err) {
eos_err("exception in getsockopt");
return;
}
// Send request to the MGM
{
XrdSysMutexHelper scop_lock(mMutexMaster);
zmq::send_flags sf = zmq::send_flags::none;
if (more) {
sf = zmq::send_flags::sndmore;
}
if (!master->send(msg, sf)) {
eos_err("error while sending to master");
return;
}
}
if (more == 0) {
break;
}
}
}
// Process a reply from the first MGM
if (items[1].revents & ZMQ_POLLIN) {
eos_debug("got mBackend event");
zmq::recv_flags rf = zmq::recv_flags::none;
while (true) {
if (!mBackend.second->recv(msg, rf).has_value()) {
eos_err("error while recv on mBackend");
return;
}
try {
more = mBackend.second->get(zmq::sockopt::rcvmore);
} catch (zmq::error_t& err) {
eos_err("exception in getsockopt");
return;
}
zmq::send_flags sf = zmq::send_flags::none;
if (more) {
sf = zmq::send_flags::sndmore;
}
if (!mFrontend->send(msg, sf)) {
eos_err("error while send to frontend(1)");
return;
}
if (more == 0) {
break;
}
}
}
}
}
//------------------------------------------------------------------------------
// Get directory object
//------------------------------------------------------------------------------
XrdSfsDirectory*
EosAuthOfs::newDir(char* user, int MonID)
{
return static_cast(new EosAuthOfsDirectory(user, MonID));
}
//------------------------------------------------------------------------------
// Get file object
//------------------------------------------------------------------------------
XrdSfsFile*
EosAuthOfs::newFile(char* user, int MonID)
{
return static_cast(new EosAuthOfsFile(user, MonID));
}
void
EosAuthOfs::ProcessError(eos::auth::ResponseProto* resp_func, XrdOucErrInfo& error, const char* path)
{
if (resp_func->has_error()) {
if (resp_func->collapse() && path && strlen(path)) {
// collpase redirection to remote AUTH MGM
std::string url = "root://";
url += resp_func->error().message();;
url += ":";
url += std::to_string(mCollapsePort);
url += "/";
url += path;
error.setErrInfo(~(~(-1) | kXR_collapseRedir), url.c_str());
} else {
// one-shot redirection to remote MGM
error.setErrInfo(resp_func->error().code(),
resp_func->error().message().c_str());
}
}
}
//------------------------------------------------------------------------------
//! Stat method
//------------------------------------------------------------------------------
int
EosAuthOfs::stat(const char* path,
struct stat* buf,
XrdOucErrInfo& error,
const XrdSecEntity* client,
const char* opaque)
{
int retc = SFS_ERROR;
eos_debug("stat path=%s", path);
// Create request object
RequestProto* req_proto = utils::GetStatRequest(RequestProto_OperationType_STAT,
path, error, client, opaque);
// Compute HMAC for request object
if (!utils::ComputeHMAC(req_proto)) {
eos_err("error HMAC FS stat");
delete req_proto;
return retc;
}
// Get a socket object from the pool
zmq::socket_t* socket;
mPoolSocket.wait_pop(socket);
if (SendProtoBufRequest(socket, req_proto)) {
ResponseProto* resp_stat = static_cast(GetResponse(socket));
if (resp_stat) {
retc = resp_stat->response();
ProcessError(resp_stat, error, path);
// We retrieve the struct stat if response is ok
if ((retc == SFS_OK) && resp_stat->has_message()) {
buf = static_cast(memcpy((void*)buf,
resp_stat->message().c_str(),
sizeof(struct stat)));
}
delete resp_stat;
}
}
// Release socket and free memory
gOFS->mPoolSocket.push(socket);
delete req_proto;
return retc;
}
//--------------------------------------------------------------------------
// Stat function to retrieve mode
//--------------------------------------------------------------------------
int
EosAuthOfs::stat(const char* path,
mode_t& mode,
XrdOucErrInfo& error,
const XrdSecEntity* client,
const char* opaque)
{
int retc = SFS_ERROR;
eos_debug("statm path=%s", path);
RequestProto* req_proto = utils::GetStatRequest(
RequestProto_OperationType_STATM,
path, error, client, opaque);
// Compute HMAC for request object
if (!utils::ComputeHMAC(req_proto)) {
eos_err("error HMAC FS statm");
delete req_proto;
return retc;
}
// Get a socket object from the pool
zmq::socket_t* socket;
mPoolSocket.wait_pop(socket);
if (SendProtoBufRequest(socket, req_proto)) {
ResponseProto* resp_stat = static_cast(GetResponse(socket));
if (resp_stat) {
retc = resp_stat->response();
ProcessError(resp_stat, error, path);
// We retrieve the open mode if response if ok
if ((retc == SFS_OK) && resp_stat->has_message()) {
memcpy((void*)&mode, resp_stat->message().c_str(), sizeof(mode_t));
}
delete resp_stat;
}
}
// Release socket and free memory
gOFS->mPoolSocket.push(socket);
delete req_proto;
return retc;
}
//------------------------------------------------------------------------------
// Execute file system command
//------------------------------------------------------------------------------
int
EosAuthOfs::fsctl(const int cmd,
const char* args,
XrdOucErrInfo& error,
const XrdSecEntity* client)
{
int retc = SFS_ERROR;
eos_debug("fsctl with cmd=%i, args=%s", cmd, args);
int opcode = cmd & SFS_FSCTL_CMD;
// For the server configuration query we asnwer with the information of the
// authentication XRootD server i.e. don't frw it to the real MGM.
if (opcode == SFS_FSCTL_LOCATE) {
char locResp[4096];
char rType[3], *Resp[] = {rType, locResp};
rType[0] = 'S';
// don't manage writes via global redirection - therefore we mark the files as 'r'
rType[1] = 'r';
rType[2] = '\0';
sprintf(locResp, "[::%s]:%d ", (char*) gOFS->mManagerIp.c_str(),
gOFS->mPort);
error.setErrInfo(strlen(locResp) + 3, (const char**) Resp, 2);
return SFS_DATA;
}
RequestProto* req_proto = utils::GetFsctlRequest(cmd, args, error, client);
// Compute HMAC for request object
if (!utils::ComputeHMAC(req_proto)) {
eos_err("error HMAC FS fsctl");
delete req_proto;
return retc;
}
// Get a socket object from the pool
zmq::socket_t* socket;
mPoolSocket.wait_pop(socket);
if (SendProtoBufRequest(socket, req_proto)) {
ResponseProto* resp_fsctl1 = static_cast(GetResponse(socket));
if (resp_fsctl1) {
retc = resp_fsctl1->response();
// TODO: we can't collapse without a path ...
ProcessError(resp_fsctl1, error, 0);
delete resp_fsctl1;
}
}
// Release socket and free memory
gOFS->mPoolSocket.push(socket);
delete req_proto;
return retc;
}
//------------------------------------------------------------------------------
// Execute file system command !!! FSctl !!!
//------------------------------------------------------------------------------
int
EosAuthOfs::FSctl(const int cmd,
XrdSfsFSctl& args,
XrdOucErrInfo& error,
const XrdSecEntity* client)
{
int retc = SFS_ERROR;
eos_debug("FSctl with cmd=%i", cmd);
RequestProto* req_proto = utils::GetFSctlRequest(cmd, args, error, client);
// Compute HMAC for request object
if (!utils::ComputeHMAC(req_proto)) {
eos_err("error HMAC FS FSctl");
delete req_proto;
return retc;
}
// Get a socket object from the pool
zmq::socket_t* socket;
mPoolSocket.wait_pop(socket);
if (SendProtoBufRequest(socket, req_proto)) {
ResponseProto* resp_fsctl2 = static_cast(GetResponse(socket));
if (resp_fsctl2) {
retc = resp_fsctl2->response();
// TODO: we can't collapse without a path ...
ProcessError(resp_fsctl2, error, 0);
delete resp_fsctl2;
}
}
// Release socket and free memory
gOFS->mPoolSocket.push(socket);
delete req_proto;
return retc;
}
//------------------------------------------------------------------------------
// Chmod by client
//------------------------------------------------------------------------------
int
EosAuthOfs::chmod(const char* path,
XrdSfsMode mode,
XrdOucErrInfo& error,
const XrdSecEntity* client,
const char* opaque)
{
int retc = SFS_ERROR;
eos_debug("chmod path=%s mode=%o", path, mode);
RequestProto* req_proto = utils::GetChmodRequest(path, mode, error, client,
opaque);
// Compute HMAC for request object
if (!utils::ComputeHMAC(req_proto)) {
eos_err("error HMAC FS chmod");
delete req_proto;
return retc;
}
// Get a socket object from the pool
zmq::socket_t* socket;
mPoolSocket.wait_pop(socket);
if (SendProtoBufRequest(socket, req_proto)) {
ResponseProto* resp_chmod = static_cast(GetResponse(socket));
if (resp_chmod) {
retc = resp_chmod->response();
ProcessError(resp_chmod, error, path);
delete resp_chmod;
}
}
// Release socket and free memory
gOFS->mPoolSocket.push(socket);
delete req_proto;
return retc;
}
//------------------------------------------------------------------------------
// Chksum by client
//------------------------------------------------------------------------------
int
EosAuthOfs::chksum(csFunc func,
const char* csName,
const char* path,
XrdOucErrInfo& error,
const XrdSecEntity* client,
const char* opaque)
{
int retc = SFS_ERROR;
eos_debug("chksum path=%s csName=%s", path, csName);
RequestProto* req_proto = utils::GetChksumRequest(func, csName, path, error,
client, opaque);
// Compute HMAC for request object
if (!utils::ComputeHMAC(req_proto)) {
eos_err("error HMAC FS chksum");
delete req_proto;
return retc;
}
// Get a socket object from the pool
zmq::socket_t* socket;
mPoolSocket.wait_pop(socket);
if (SendProtoBufRequest(socket, req_proto)) {
ResponseProto* resp_chksum = static_cast(GetResponse(socket));
if (resp_chksum) {
retc = resp_chksum->response();
eos_debug("chksum retc=%i", retc);
ProcessError(resp_chksum, error, path);
delete resp_chksum;
}
}
// Release socket and free memory
gOFS->mPoolSocket.push(socket);
delete req_proto;
return retc;
}
//------------------------------------------------------------------------------
// Exists function
//------------------------------------------------------------------------------
int
EosAuthOfs::exists(const char* path,
XrdSfsFileExistence& exists_flag,
XrdOucErrInfo& error,
const XrdSecEntity* client,
const char* opaque)
{
int retc = SFS_ERROR;
eos_debug("exists path=%s", path);
RequestProto* req_proto = utils::GetExistsRequest(path, error, client, opaque);
// Compute HMAC for request object
if (!utils::ComputeHMAC(req_proto)) {
eos_err("error HMAC FS exists");
delete req_proto;
return retc;
}
// Get a socket object from the pool
zmq::socket_t* socket;
mPoolSocket.wait_pop(socket);
if (SendProtoBufRequest(socket, req_proto)) {
ResponseProto* resp_exists = static_cast(GetResponse(socket));
if (resp_exists) {
retc = resp_exists->response();
eos_debug("exists retc=%i", retc);
ProcessError(resp_exists, error, path);
if (resp_exists->has_message()) {
exists_flag = (XrdSfsFileExistence)atoi(resp_exists->message().c_str());
}
delete resp_exists;
}
}
// Release socket and free memory
gOFS->mPoolSocket.push(socket);
delete req_proto;
return retc;
}
//------------------------------------------------------------------------------
// Create directory
// Note: the mode set here is actually ignored if the directoy is not the top
// one. The new directory inherits the mode bits from its parent directory.
// This is typical only for EOS since in a normal XRootD server the access bits
// specified in the mkdir command are actually applied as expected.
//------------------------------------------------------------------------------
int
EosAuthOfs::mkdir(const char* path,
XrdSfsMode mode, // Ignored in EOS if it has a parent dir
XrdOucErrInfo& error,
const XrdSecEntity* client,
const char* opaque)
{
int retc = SFS_ERROR;
eos_debug("mkdir path=%s mode=%o", path, mode);
RequestProto* req_proto = utils::GetMkdirRequest(path, mode, error, client,
opaque);
// Compute HMAC for request object
if (!utils::ComputeHMAC(req_proto)) {
eos_err("error HMAC FS mkdir");
delete req_proto;
return retc;
}
// Get a socket object from the pool
zmq::socket_t* socket;
mPoolSocket.wait_pop(socket);
if (SendProtoBufRequest(socket, req_proto)) {
ResponseProto* resp_mkdir = static_cast(GetResponse(socket));
if (resp_mkdir) {
retc = resp_mkdir->response();
eos_debug("mkdir retc=%i", retc);
ProcessError(resp_mkdir, error, path);
delete resp_mkdir;
}
}
// Release socket and free memory
gOFS->mPoolSocket.push(socket);
delete req_proto;
return retc;
}
//------------------------------------------------------------------------------
// Remove directory
//------------------------------------------------------------------------------
int
EosAuthOfs::remdir(const char* path,
XrdOucErrInfo& error,
const XrdSecEntity* client,
const char* opaque)
{
int retc = SFS_ERROR;
eos_debug("remdir path=%s", path);
RequestProto* req_proto = utils::GetRemdirRequest(path, error, client, opaque);
// Compute HMAC for request object
if (!utils::ComputeHMAC(req_proto)) {
eos_err("error HMAC FS remdir");
delete req_proto;
return retc;
}
// Get a socket object from the pool
zmq::socket_t* socket;
mPoolSocket.wait_pop(socket);
if (SendProtoBufRequest(socket, req_proto)) {
ResponseProto* resp_remdir = static_cast(GetResponse(socket));
if (resp_remdir) {
retc = resp_remdir->response();
eos_debug("remdir retc=%i", retc);
ProcessError(resp_remdir, error, path);
delete resp_remdir;
}
}
// Release socket and free memory
gOFS->mPoolSocket.push(socket);
delete req_proto;
return retc;
}
//------------------------------------------------------------------------------
// Remove file
//------------------------------------------------------------------------------
int
EosAuthOfs::rem(const char* path,
XrdOucErrInfo& error,
const XrdSecEntity* client,
const char* opaque)
{
int retc = SFS_ERROR;
eos_debug("rem path=%s", path);
RequestProto* req_proto = utils::GetRemRequest(path, error, client, opaque);
// Compute HMAC for request object
if (!utils::ComputeHMAC(req_proto)) {
eos_err("error HMAC FS rem");
delete req_proto;
return retc;
}
// Get a socket object from the pool
zmq::socket_t* socket;
mPoolSocket.wait_pop(socket);
if (SendProtoBufRequest(socket, req_proto)) {
ResponseProto* resp_rem = static_cast(GetResponse(socket));
if (resp_rem) {
retc = resp_rem->response();
eos_debug("rem retc=%i", retc);
ProcessError(resp_rem, error, path);
delete resp_rem;
}
}
// Release socket and free memory
gOFS->mPoolSocket.push(socket);
delete req_proto;
return retc;
}
//------------------------------------------------------------------------------
// Rename file
//------------------------------------------------------------------------------
int
EosAuthOfs::rename(const char* oldName,
const char* newName,
XrdOucErrInfo& error,
const XrdSecEntity* client,
const char* opaqueO,
const char* opaqueN)
{
int retc = SFS_ERROR;
eos_debug("rename oldname=%s newname=%s", oldName, newName);
RequestProto* req_proto = utils::GetRenameRequest(oldName, newName, error,
client, opaqueO, opaqueN);
// Compute HMAC for request object
if (!utils::ComputeHMAC(req_proto)) {
eos_err("error HMAC FS rename");
delete req_proto;
return retc;
}
// Get a socket object from the pool
zmq::socket_t* socket;
mPoolSocket.wait_pop(socket);
if (SendProtoBufRequest(socket, req_proto)) {
ResponseProto* resp_rename = static_cast(GetResponse(socket));
if (resp_rename) {
retc = resp_rename->response();
eos_debug("rename retc=%i", retc);
ProcessError(resp_rename, error, 0);
delete resp_rename;
}
}
// Release socket and free memory
gOFS->mPoolSocket.push(socket);
delete req_proto;
return retc;
}
//------------------------------------------------------------------------------
// Prepare request
//------------------------------------------------------------------------------
int
EosAuthOfs::prepare(XrdSfsPrep& pargs,
XrdOucErrInfo& error,
const XrdSecEntity* client)
{
int retc = SFS_ERROR;
eos_debug("prepare");
RequestProto* req_proto = utils::GetPrepareRequest(pargs, error, client);
// Compute HMAC for request object
if (!utils::ComputeHMAC(req_proto)) {
eos_err("error HMAC FS prepare");
delete req_proto;
return retc;
}
// Get a socket object from the pool
zmq::socket_t* socket;
mPoolSocket.wait_pop(socket);
if (SendProtoBufRequest(socket, req_proto)) {
ResponseProto* resp_prepare = static_cast(GetResponse(socket));
if (resp_prepare) {
retc = resp_prepare->response();
eos_debug("prepare retc=%i", retc);
ProcessError(resp_prepare, error, 0);
delete resp_prepare;
}
}
// Release socket and free memory
gOFS->mPoolSocket.push(socket);
delete req_proto;
return retc;
}
//------------------------------------------------------------------------------
// Truncate file
//------------------------------------------------------------------------------
int
EosAuthOfs::truncate(const char* path,
XrdSfsFileOffset fileOffset,
XrdOucErrInfo& error,
const XrdSecEntity* client,
const char* opaque)
{
int retc = SFS_ERROR;
eos_debug("truncate");
RequestProto* req_proto = utils::GetTruncateRequest(path, fileOffset, error,
client, opaque);
// Compute HMAC for request object
if (!utils::ComputeHMAC(req_proto)) {
eos_err("error HMAC FS truncate");
delete req_proto;
return retc;
}
// Get a socket object from the pool
zmq::socket_t* socket;
mPoolSocket.wait_pop(socket);
if (SendProtoBufRequest(socket, req_proto)) {
ResponseProto* resp_truncate = static_cast(GetResponse(socket));
if (resp_truncate) {
retc = resp_truncate->response();
eos_debug("truncate retc=%i", retc);
ProcessError(resp_truncate, error, path);
delete resp_truncate;
}
}
// Release socket and free memory
gOFS->mPoolSocket.push(socket);
delete req_proto;
return retc;
}
//------------------------------------------------------------------------------
// getStats function - not supported by EOS, fake ok response HERE i.e. do not
// build and send a request to the real MGM
//------------------------------------------------------------------------------
int
EosAuthOfs::getStats(char* buff, int blen)
{
int retc = SFS_OK;
eos_debug("getStats");
return retc;
}
//------------------------------------------------------------------------------
// Send ProtocolBuffer object using ZMQ
//------------------------------------------------------------------------------
bool
EosAuthOfs::SendProtoBufRequest(zmq::socket_t* socket,
google::protobuf::Message* message)
{
// Send the request
bool sent = false;
#if GOOGLE_PROTOBUF_VERSION < 3004000
int msg_size = message->ByteSize();
#else
int msg_size = message->ByteSizeLong();
#endif
zmq::message_t request(msg_size);
google::protobuf::io::ArrayOutputStream aos(request.data(), msg_size);
if (!message->SerializeToZeroCopyStream(&aos)) {
eos_err("failed to serialize message");
return sent;
}
zmq::send_flags sf = zmq::send_flags::dontwait;
auto r = socket->send(request, sf);
if (r.has_value()) {
sent = true;
}
if (!sent) {
eos_err("unable to send request using zmq");
}
return sent;
}
//------------------------------------------------------------------------------
// Get ProtocolBuffer response object using ZMQ
//------------------------------------------------------------------------------
google::protobuf::Message*
EosAuthOfs::GetResponse(zmq::socket_t*& socket)
{
// It makes no sense to wait more than 1 min since the XRootD client will
// timeout by default after 60 seconds.
int num_retries = 12; // 1 min = 12 * 5 sec
bool done = false;
bool reset_socket = false;
zmq::message_t reply;
ResponseProto* resp = static_cast(0);
try {
zmq::recv_flags rf = zmq::recv_flags::none;
zmq::recv_result_t rr;
do {
rr = socket->recv(reply, rf);
--num_retries;
if (!rr.has_value()) {
eos_err("ptr_socket=%p, num_retries=%i failed receive", socket,
num_retries);
} else {
done = true;
}
} while (!rr.has_value() && (num_retries > 0));
} catch (zmq::error_t& e) {
eos_err("socket error: %s", e.what());
reset_socket = true;
}
// We time out while waiting for a response or a fatal error occurent -
// then we throw away the socket and create a new one
if ((num_retries <= 0) || reset_socket) {
eos_err("discard current socket and create a new one");
delete socket;
socket = new zmq::socket_t(*mZmqContext, ZMQ_REQ);
int timeout_mili = 5000;
socket->set(zmq::sockopt::rcvtimeo, timeout_mili);
int socket_linger = 0;
socket->set(zmq::sockopt::linger, socket_linger);
std::string endpoint = "inproc://proxyfrontend";
// Try in a loop to connect to the proxyfrontend as it can take a while for
// the proxy thread to do the binding, therefore connect can fail
while (1) {
try {
socket->connect(endpoint.c_str());
} catch (zmq::error_t& err) {
eos_warning("dealing with connect exception, retrying ...");
continue;
}
break;
}
}
if (done) {
std::string resp_str = std::string(static_cast(reply.data()),
reply.size());
resp = new ResponseProto();
resp->ParseFromString(resp_str);
// If the response is a redirect we redirect to our own port number on the target
// - this allows to failover access from one AUTH to another AUTH daemon in an HA MGM setup
if (resp->response() == SFS_REDIRECT) {
if (resp->has_error()) {
std::ostringstream sstr;
sstr << resp->error().message();
std::string redirect_host = sstr.str();
// Add redirect collapse
resp->set_collapse(true);
} else {
eos_err("redirect message without error information - change to error");
resp->set_response(SFS_ERROR);
}
}
} else {
eos_err("socket error/timeout during receive");
}
return resp;
}
EOSAUTHNAMESPACE_END
================================================
FILE: auth_plugin/EosAuthOfs.hh
================================================
// -----------------------------------------------------------------------------
// File: EosAuthOfs.hh
// Author: Elvin-Alin Sindrilaru - CERN
// -----------------------------------------------------------------------------
/************************************************************************
* EOS - the CERN Disk Storage System *
* Copyright (C) 2013 CERN/Switzerland *
* *
* This program is free software: you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation, either version 3 of the License, or *
* (at your option) any later version. *
* *
* This program is distributed in the hope that it will be useful, *
* but WITHOUT ANY WARRANTY; without even the implied warranty of *
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
* GNU General Public License for more details. *
* *
* You should have received a copy of the GNU General Public License *
* along with this program. If not, see .*
************************************************************************/
#ifndef __EOSAUTH_OFS_HH__
#define __EOSAUTH_OFS_HH__
#include
#include "Namespace.hh"
#include "common/ConcurrentQueue.hh"
#include
#include
//! Forward declaration
class EosAuthOfsDirectory;
class EosAuthOfsFile;
namespace eos
{
namespace auth
{
class ResponseProto;
}
}
namespace google
{
namespace protobuf
{
class Message;
}
}
EOSAUTHNAMESPACE_BEGIN
//------------------------------------------------------------------------------
//! Class EosAuthOfs built on top of XrdOfs
/*! Decription: The libEosAuthOfs.so is inteded to be used as an OFS library
plugin with a vanilla XRootD server. What it does is to connect using ZMQ
sockets to onel MGM node.
a slave MGM). The endpoint is read from the configuration file, by default
it will connect to localhost:1094 !
The EosAuthOfs plugin
then tries to replay all the requests it receives from the clients to the
master MGM node. It does this by marshalling the request and identity of the
client using ProtocolBuffers and sends this request using ZMQ to the configured
MGM node.
There are several tunable parameters for this configuration (auth + MGMs):
AUTH - configuration
====================
- eosauth.mgm - contain the hostname and the
port to which ZMQ will connect so that it can forward
requests and receive responses.
- eosauth.numsockets - once a clients wants to send a request the thread
allocated to him in XRootD will require a socket to send the request
to the MGM node. Therefore, we set up a pool of sockets from the
begining which can be used to send/receiver requests/responses.
The default size is 10 sockets.
MGM - configuration
===================
- mgmofs.auththreads - since we now receive requests using ZMQ, we no longer
use the default thread pool from XRootD and we need threads for dealing
with the requests. This parameter sets the thread pool size when starting
the MGM node.
- mgmofs.authport - this is the endpoint where the MGM listens for ZMQ
requests from any EosAuthOfs plugins. This port needs to be opened also
in the firewall.
- mgmofs.localhost true|false - by default the ZMQ endpoint will listen on
all interfaces, but often the front-end will run on the same node and
for security we want only to have localhost connections
In case of a master <=> slave switch the EosAuthOfs plugin adapts
automatically based on the information provided by the slave MGM which
should redirect all clients with write requests to the master node. Care
should be taken when specifying the two endpoints since the switch is done
ONLY IF the redirection HOST matches one of the two endpoints specified in
the configuration of the authentication plugin (namely eosauth.instance).
Once the switch is done all requests be them read or write are sent to the
new master MGM node.
*/
//------------------------------------------------------------------------------
class EosAuthOfs: public XrdOfs, public eos::common::LogId
{
friend class EosAuthOfsDirectory;
friend class EosAuthOfsFile;
public:
//--------------------------------------------------------------------------
//! Constuctor
//--------------------------------------------------------------------------
EosAuthOfs();
//--------------------------------------------------------------------------
//! Destructor
//--------------------------------------------------------------------------
virtual ~EosAuthOfs();
//--------------------------------------------------------------------------
//! Configure routine
//--------------------------------------------------------------------------
virtual int Configure(XrdSysError& error, XrdOucEnv* envP);
//--------------------------------------------------------------------------
//! Get directory object
//--------------------------------------------------------------------------
XrdSfsDirectory* newDir(char* user = 0, int MonID = 0);
//--------------------------------------------------------------------------
// Get file object
//--------------------------------------------------------------------------
XrdSfsFile* newFile(char* user = 0, int MonID = 0);
//--------------------------------------------------------------------------
//! Stat function
//--------------------------------------------------------------------------
int stat(const char* path,
struct stat* buf,
XrdOucErrInfo& error,
const XrdSecEntity* client,
const char* opaque = 0);
//--------------------------------------------------------------------------
//! Stat function to retrieve mode
//--------------------------------------------------------------------------
int stat(const char* name,
mode_t& mode,
XrdOucErrInfo& out_error,
const XrdSecEntity* client,
const char* opaque = 0);
//--------------------------------------------------------------------------
//! Execute file system command !!! fsctl !!!
//--------------------------------------------------------------------------
int fsctl(const int cmd,
const char* args,
XrdOucErrInfo& out_error,
const XrdSecEntity* client);
//--------------------------------------------------------------------------
//! Execute file system command !!! FSctl !!!
//--------------------------------------------------------------------------
int FSctl(const int cmd,
XrdSfsFSctl& args,
XrdOucErrInfo& error,
const XrdSecEntity* client = 0);
//--------------------------------------------------------------------------
//! Chmod function
//--------------------------------------------------------------------------
int chmod(const char* path,
XrdSfsMode mopde,
XrdOucErrInfo& error,
const XrdSecEntity* client,
const char* opaque = 0);
//--------------------------------------------------------------------------
//! Chksum function
//--------------------------------------------------------------------------
int chksum(csFunc func,
const char* csName,
const char* path,
XrdOucErrInfo& error,
const XrdSecEntity* client = 0,
const char* opaque = 0);
//--------------------------------------------------------------------------
//! Exists function
//--------------------------------------------------------------------------
int exists(const char* path,
XrdSfsFileExistence& exists_flag,
XrdOucErrInfo& error,
const XrdSecEntity* client,
const char* opaque = 0);
//--------------------------------------------------------------------------
//! Create directory
//--------------------------------------------------------------------------
int mkdir(const char* dirName,
XrdSfsMode Mode,
XrdOucErrInfo& out_error,
const XrdSecEntity* client,
const char* opaque = 0);
//--------------------------------------------------------------------------
//! Remove directory
//--------------------------------------------------------------------------
int remdir(const char* path,
XrdOucErrInfo& error,
const XrdSecEntity* client,
const char* opaque = 0);
//--------------------------------------------------------------------------
//! Rem file
//--------------------------------------------------------------------------
int rem(const char* path,
XrdOucErrInfo& error,
const XrdSecEntity* client,
const char* opaque = 0);
//--------------------------------------------------------------------------
//! Rename file
//--------------------------------------------------------------------------
int rename(const char* oldName,
const char* newName,
XrdOucErrInfo& error,
const XrdSecEntity* client,
const char* opaqueO = 0,
const char* opaqueN = 0);
//--------------------------------------------------------------------------
//! Prepare request
//--------------------------------------------------------------------------
int prepare(XrdSfsPrep& pargs,
XrdOucErrInfo& error,
const XrdSecEntity* client = 0);
//--------------------------------------------------------------------------
//! Truncate file
//--------------------------------------------------------------------------
int truncate(const char* path,
XrdSfsFileOffset fileOffset,
XrdOucErrInfo& error,
const XrdSecEntity* client = 0,
const char* opaque = 0);
//--------------------------------------------------------------------------
//! getStats function - fake an ok response HERE i.e. do not build and sent
//! a request to the real MGM
//--------------------------------------------------------------------------
int getStats(char* buff, int blen);
//--------------------------------------------------------------------------
//! Process a proto error response and configure
//! a collpasing redirection if requested/possible
//--------------------------------------------------------------------------
void ProcessError(eos::auth::ResponseProto* resp_func, XrdOucErrInfo& error, const char* path);
private:
pthread_t proxy_tid; ///< id of the proxy thread
zmq::context_t* mZmqContext; ///< ZMQ context
zmq::socket_t* mFrontend; ///< proxy socket facing the clients
XrdSysMutex mMutexMaster;
int mSizePoolSocket; ///< maximum size of the client socket pool
eos::common::ConcurrentQueue
mPoolSocket; ///< ZMQ client socket pool
///! MGM endpoints to which requests can be dispatched and the corresponding sockets
std::pair mBackend;
std::string mManagerIp; ///< auth ip address
int mPort; ///< port on which the current auth server runs
int mCollapsePort; ///< port to which a redirect gets collapsed on
int mLogLevel; ///< log level value 0 -7 (LOG_EMERG - LOG_DEBUG)
//--------------------------------------------------------------------------
//! Authentication proxy thread which forwards requests form the clients
//! to the proper MGM intance.
//--------------------------------------------------------------------------
void AuthProxyThread();
//--------------------------------------------------------------------------
//! Authentication proxy thread startup function
//--------------------------------------------------------------------------
static void* StartAuthProxyThread(void* pp);
//--------------------------------------------------------------------------
//! Send ProtocolBuffer object using ZMQ
//!
//! @param socket ZMQ socket object
//! @param object to be sent over the wire
//!
//! @return true if object sent successfully, otherwise false
//!
//--------------------------------------------------------------------------
bool SendProtoBufRequest(zmq::socket_t* socket,
google::protobuf::Message* message);
//--------------------------------------------------------------------------
//! Get ProtocolBuffer reply object using ZMQ
//!
//! @param socket ZMQ socket object
//!
//! @return pointer to received object, the user has the responsibility to
//! delete the obtained object
//!
//--------------------------------------------------------------------------
google::protobuf::Message* GetResponse(zmq::socket_t*& socket);
//--------------------------------------------------------------------------
};
//------------------------------------------------------------------------------
//! Global OFS object
//------------------------------------------------------------------------------
extern EosAuthOfs* gOFS;
EOSAUTHNAMESPACE_END
#endif //__EOSAUTH_OFS_HH__
================================================
FILE: auth_plugin/EosAuthOfsDirectory.cc
================================================
//------------------------------------------------------------------------------
// File: EosAuthOfsDirectory.cc
// Author: Elvin-Alin Sindrilau CERN
//------------------------------------------------------------------------------
/************************************************************************
* EOS - the CERN Disk Storage System *
* Copyright (C) 2013 CERN/Switzerland *
* *
* This program is free software: you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation, either version 3 of the License, or *
* (at your option) any later version. *
* *
* This program is distributed in the hope that it will be useful, *
* but WITHOUT ANY WARRANTY; without even the implied warranty of *
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
* GNU General Public License for more details. *
* *
* You should have received a copy of the GNU General Public License *
* along with this program. If not, see