Repository: cern-eos/eos Branch: master Commit: a94b0f29148b Files: 2385 Total size: 100.5 MB Directory structure: gitextract_gne0gw27/ ├── .clang-format ├── .clang-tidy ├── .codeclimate.yml ├── .ctest/ │ └── config.cmake ├── .git-blame-ignore-revs ├── .gitignore ├── .gitlab-ci.yml ├── .gitmodules ├── .ignore ├── .mailmap ├── .pre-commit-config.yaml ├── AUDIT.md ├── ApMon/ │ ├── AUTHORS │ ├── COPYING │ ├── ChangeLog │ ├── INSTALL │ ├── Makefile │ ├── NEWS │ ├── README │ ├── eos-apmon.spec │ ├── etc/ │ │ └── logrotate.d/ │ │ └── eosapmond │ ├── jenkins-build.sh │ ├── maketar.sh │ ├── opt/ │ │ └── eos/ │ │ └── apmon/ │ │ └── eosapmond │ ├── perl/ │ │ └── ApMon/ │ │ ├── ApMon/ │ │ │ ├── BgMonitor.pm │ │ │ ├── Common.pm │ │ │ ├── ConfigLoader.pm │ │ │ ├── ProcInfo.pm │ │ │ └── XDRUtils.pm │ │ ├── ApMon.pm │ │ ├── sendToML.sh │ │ └── servMon.sh │ ├── run.sh │ └── usr/ │ └── lib/ │ └── systemd/ │ └── system/ │ └── eosapmond.service ├── CMakeLists.txt ├── CTestConfig.cmake ├── License ├── README.md ├── archive/ │ ├── CMakeLists.txt │ ├── eosarch/ │ │ ├── __init__.py │ │ ├── archivefile.py │ │ ├── asynchandler.py │ │ ├── configuration.py │ │ ├── exceptions.py │ │ ├── processinfo.py │ │ ├── tests/ │ │ │ ├── __init__.py │ │ │ ├── env.py │ │ │ └── test_archivefile.py │ │ ├── transfer.py │ │ └── utils.py │ ├── eosarch_reconstruct.py │ ├── eosarch_run.py │ ├── eosarchived.conf │ ├── eosarchived.py │ ├── eosarchived.service │ ├── eosarchived_env.sysconfig │ └── opt-eos-xrootd.pth ├── auth_plugin/ │ ├── CMakeLists.txt │ ├── EosAuthOfs.cc │ ├── EosAuthOfs.hh │ ├── EosAuthOfsDirectory.cc │ ├── EosAuthOfsDirectory.hh │ ├── EosAuthOfsFile.cc │ ├── EosAuthOfsFile.hh │ ├── Namespace.hh │ ├── ProtoUtils.cc │ ├── ProtoUtils.hh │ └── proto/ │ ├── Chksum.proto │ ├── Chmod.proto │ ├── DirClose.proto │ ├── DirFname.proto │ ├── DirOpen.proto │ ├── DirRead.proto │ ├── Exists.proto │ ├── FS_ctl.proto │ ├── FileClose.proto │ ├── FileFname.proto │ ├── FileOpen.proto │ ├── FileRead.proto │ ├── FileStat.proto │ ├── FileWrite.proto │ ├── Fsctl.proto │ ├── GetStats.proto │ ├── Mkdir.proto │ ├── Prepare.proto │ ├── Rem.proto │ ├── Remdir.proto │ ├── Rename.proto │ ├── Request.proto │ ├── Response.proto │ ├── Stat.proto │ ├── Truncate.proto │ ├── XrdOucErrInfo.proto │ ├── XrdSecEntity.proto │ ├── XrdSfsFSctl.proto │ └── XrdSfsPrep.proto ├── client/ │ ├── CMakeLists.txt │ ├── Namespace.hh │ └── grpc/ │ ├── Find.cc │ ├── GrpcClient.cc │ ├── GrpcClient.hh │ ├── GrpcClientAuthProcessor.hh │ ├── Insert.cc │ ├── Md.cc │ ├── Ns.cc │ ├── NsStat.cc │ └── Ping.cc ├── cmake/ │ ├── CPUArchFlags.cmake │ ├── DownloadProject.CMakeLists.cmake.in │ ├── DownloadProject.cmake │ ├── EosCompileFlags.cmake │ ├── EosCoverage.cmake │ ├── EosFindLibs.cmake │ ├── EosGraphviz.cmake │ ├── EosOSDefaults.cmake │ ├── EosSummary.cmake │ ├── EosTui.cmake │ ├── EosTuiInstall.cmake.in │ ├── EosUtils.cmake │ ├── FindActiveMQCPP.cmake │ ├── FindAtomic.cmake │ ├── FindEosGrpcGateway.cmake │ ├── FindGRPC.cmake │ ├── FindGlobus.cmake │ ├── FindLibevent.cmake │ ├── FindProtobuf3.cmake │ ├── FindPythonSitePkg.cmake │ ├── FindRocksDB.cmake │ ├── FindScitokens.cmake │ ├── FindSnappy.cmake │ ├── FindSparseHash.cmake │ ├── FindSphinx.cmake │ ├── FindXRootD.cmake │ ├── FindZMQ.cmake │ ├── Findabsl.cmake │ ├── Findbz2.cmake │ ├── Finddavix.cmake │ ├── Findeosfolly.cmake │ ├── Findfuse.cmake │ ├── Findfuse3.cmake │ ├── Findglibc.cmake │ ├── Findhelp2man.cmake │ ├── Findisal.cmake │ ├── Findisal_crypto.cmake │ ├── Findjemalloc.cmake │ ├── Findjsoncpp.cmake │ ├── Findkrb5.cmake │ ├── Findldap.cmake │ ├── Findlibbfd.cmake │ ├── Findlibproc2.cmake │ ├── Findlibunwind.cmake │ ├── Findlz4.cmake │ ├── Findncurses.cmake │ ├── Findnfs.cmake │ ├── Findprocps.cmake │ ├── Findreadline.cmake │ ├── Finduuid.cmake │ ├── Findxfs.cmake │ ├── Findxxhash.cmake │ ├── Findzstd.cmake │ ├── cmake_uninstall.cmake.in │ └── config_spec.cmake.in ├── common/ │ ├── Assert.hh │ ├── AssistedThread.hh │ ├── Audit.cc │ ├── Audit.hh │ ├── BehaviourConfig.cc │ ├── BehaviourConfig.hh │ ├── BufferManager.cc │ ├── BufferManager.hh │ ├── CLI11.hpp │ ├── CMakeLists.txt │ ├── CloExec.hh │ ├── ClockGetTime.cc │ ├── ClockGetTime.hh │ ├── CommentLog.cc │ ├── CommentLog.hh │ ├── ConcurrentQueue.hh │ ├── Config.cc │ ├── Config.hh │ ├── Constants.hh │ ├── CopyProcess.hh │ ├── Counter.hh │ ├── CtaCommon.hh │ ├── DBG.hh │ ├── Definitions.hh │ ├── EosLayoutPrint.cc │ ├── ErrnoToString.cc │ ├── ErrnoToString.hh │ ├── ExpiryCache.hh │ ├── FileId.hh │ ├── FileMap.hh │ ├── FileSystem.cc │ ├── FileSystem.hh │ ├── Fmd.cc │ ├── Fmd.hh │ ├── FutureWrapper.hh │ ├── Glob.cc │ ├── Glob.hh │ ├── IRWMutex.hh │ ├── InodeTranslator.hh │ ├── InstanceName.cc │ ├── InstanceName.hh │ ├── IntervalStopwatch.cc │ ├── IntervalStopwatch.hh │ ├── IoPipe.hh │ ├── JeMallocHandler.cc │ ├── JeMallocHandler.hh │ ├── LOGGING.md │ ├── LRU.hh │ ├── LayoutId.hh │ ├── LinuxFds.hh │ ├── LinuxMemConsumption.hh │ ├── LinuxStat.hh │ ├── LinuxTotalMem.hh │ ├── Locators.cc │ ├── Locators.hh │ ├── Logging.cc │ ├── Logging.hh │ ├── Macros.hh │ ├── Mapping.cc │ ├── Mapping.hh │ ├── Murmur3.hh │ ├── MutexLatencyWatcher.cc │ ├── MutexLatencyWatcher.hh │ ├── Namespace.hh │ ├── OAuth.cc │ ├── OAuth.hh │ ├── ObserverMgr.hh │ ├── Parallel.hh │ ├── ParseUtils.hh │ ├── PasswordHandler.hh │ ├── Path.hh │ ├── PthreadRWMutex.cc │ ├── PthreadRWMutex.hh │ ├── QuarkDBHealthParser.hh │ ├── RWMutex.cc │ ├── RWMutex.hh │ ├── RateLimit.cc │ ├── RateLimit.hh │ ├── RegexWrapper.cc │ ├── RegexWrapper.hh │ ├── Report.cc │ ├── Report.hh │ ├── SecEntity.hh │ ├── ShardedCache.hh │ ├── SharedCallbackList.hh │ ├── SharedMutex.cc │ ├── SharedMutex.hh │ ├── ShellCmd.cc │ ├── ShellCmd.hh │ ├── ShellExecutor.cc │ ├── ShellExecutor.hh │ ├── StackTrace.hh │ ├── StacktraceHere.cc │ ├── StacktraceHere.hh │ ├── Statfs.cc │ ├── Statfs.hh │ ├── Statistics.hh │ ├── Status.hh │ ├── SteadyClock.hh │ ├── Strerror_r_wrapper.cc │ ├── Strerror_r_wrapper.hh │ ├── StringConversion.cc │ ├── StringConversion.hh │ ├── StringSplit.hh │ ├── StringTokenizer.cc │ ├── StringTokenizer.hh │ ├── StringUtils.hh │ ├── SymKeys.cc │ ├── SymKeys.hh │ ├── SyncAll.hh │ ├── SystemClock.hh │ ├── ThreadPool.hh │ ├── Timing.hh │ ├── UnixGroupsFetcher.cc │ ├── UnixGroupsFetcher.hh │ ├── Untraceable.hh │ ├── UriCapCipher.cc │ ├── UriCapCipher.hh │ ├── Utils.cc │ ├── Utils.hh │ ├── VirtualIdentity.cc │ ├── VirtualIdentity.hh │ ├── WFEClient.hh │ ├── WaitInterval.hh │ ├── WebNotify.cc │ ├── WebNotify.hh │ ├── XattrCompat.hh │ ├── XrdConnPool.cc │ ├── XrdConnPool.hh │ ├── XrdErrorMap.cc │ ├── XrdErrorMap.hh │ ├── async/ │ │ ├── ExecutorMgr.hh │ │ └── OpaqueFuture.hh │ ├── blake3/ │ │ ├── README.md │ │ ├── blake3.c │ │ ├── blake3.h │ │ ├── blake3_avx2.c │ │ ├── blake3_avx2_x86-64_unix.S │ │ ├── blake3_avx512.c │ │ ├── blake3_avx512_x86-64_unix.S │ │ ├── blake3_dispatch.c │ │ ├── blake3_impl.h │ │ ├── blake3_neon.c │ │ ├── blake3_portable.c │ │ ├── blake3_sse2.c │ │ ├── blake3_sse2_x86-64_unix.S │ │ ├── blake3_sse41.c │ │ ├── blake3_sse41_x86-64_unix.S │ │ └── main.c │ ├── concurrency/ │ │ ├── AlignMacros.hh │ │ ├── AlignedArray.hh │ │ ├── AtomicUniquePtr.h │ │ ├── RCULite.hh │ │ ├── ThreadEpochCounter.cc │ │ └── ThreadEpochCounter.hh │ ├── config/ │ │ ├── ConfigParsing.cc │ │ ├── ConfigParsing.hh │ │ └── ConfigStore.hh │ ├── crc32c/ │ │ ├── crc32c.cc │ │ ├── crc32c.h │ │ ├── crc32ctables.cc │ │ └── crc32ctables.h │ ├── doxygen.hh │ ├── eos_cta_pb/ │ │ ├── CMakeLists.txt │ │ └── EosCtaAlertHandler.hh │ ├── exception/ │ │ ├── Exception.cc │ │ └── Exception.hh │ ├── highwayhash/ │ │ ├── arch_specific.h │ │ ├── c_bindings.h │ │ ├── compiler_specific.h │ │ ├── data_parallel.h │ │ ├── endianess.h │ │ ├── hh_avx2.h │ │ ├── hh_buffer.h │ │ ├── hh_neon.h │ │ ├── hh_portable.h │ │ ├── hh_sse41.h │ │ ├── hh_types.h │ │ ├── hh_vsx.h │ │ ├── highwayhash.h │ │ ├── highwayhash_target.h │ │ ├── highwayhash_test_target.h │ │ ├── iaca.h │ │ ├── instruction_sets.h │ │ ├── load3.h │ │ ├── nanobenchmark.h │ │ ├── os_mac.h │ │ ├── os_specific.h │ │ ├── profiler.h │ │ ├── robust_statistics.h │ │ ├── scalar.h │ │ ├── scalar_sip_tree_hash.h │ │ ├── sip_hash.h │ │ ├── sip_tree_hash.h │ │ ├── state_helpers.h │ │ ├── tsc_timer.h │ │ ├── vector128.h │ │ ├── vector256.h │ │ ├── vector_neon.h │ │ └── vector_test_target.h │ ├── hopscotch_hash.hh │ ├── hopscotch_map.hh │ ├── http/ │ │ ├── HttpHandler.hh │ │ ├── HttpRequest.cc │ │ ├── HttpRequest.hh │ │ ├── HttpResponse.cc │ │ ├── HttpResponse.hh │ │ ├── HttpServer.cc │ │ ├── HttpServer.hh │ │ ├── MimeTypes.hh │ │ ├── OwnCloud.hh │ │ ├── PlainHttpResponse.hh │ │ ├── ProtocolHandler.hh │ │ ├── ProtocolHandlerFactory.hh │ │ └── s3/ │ │ ├── S3Handler.cc │ │ ├── S3Handler.hh │ │ └── S3Response.hh │ ├── json/ │ │ ├── Json.hh │ │ ├── JsonCppJsonifier.hh │ │ ├── Jsonifiable.hh │ │ └── Jsonifier.hh │ ├── mq/ │ │ ├── FsChangeListener.cc │ │ ├── FsChangeListener.hh │ │ ├── GlobalConfigChangeListener.cc │ │ ├── GlobalConfigChangeListener.hh │ │ ├── LocalHash.cc │ │ ├── LocalHash.hh │ │ ├── MessagingRealm.cc │ │ ├── MessagingRealm.hh │ │ ├── Namespace.hh │ │ ├── QdbListener.cc │ │ ├── QdbListener.hh │ │ ├── SharedDequeProvider.cc │ │ ├── SharedDequeProvider.hh │ │ ├── SharedHashProvider.cc │ │ ├── SharedHashProvider.hh │ │ ├── SharedHashWrapper.cc │ │ ├── SharedHashWrapper.hh │ │ └── XrdMqTiming.hh │ ├── mutextest/ │ │ └── RWMutexTest.cc │ ├── plugin_manager/ │ │ ├── DynamicLibrary.cc │ │ ├── DynamicLibrary.hh │ │ ├── Plugin.hh │ │ ├── PluginManager.cc │ │ └── PluginManager.hh │ ├── shaping/ │ │ ├── IoStatsKey.hh │ │ └── SlidingWindowStats.hh │ ├── shellexectest/ │ │ └── shell_exec_test.cc │ ├── stringencoders/ │ │ ├── modp_numtoa.c │ │ └── modp_numtoa.h │ ├── table_formatter/ │ │ ├── TableCell.cc │ │ ├── TableCell.hh │ │ ├── TableFormatterBase.cc │ │ ├── TableFormatterBase.hh │ │ └── TableFormatting.hh │ ├── thread_id.hh │ ├── token/ │ │ ├── EosTok.cc │ │ ├── EosTok.hh │ │ ├── SciToken.cc │ │ ├── SciToken.hh │ │ ├── Token.hh │ │ ├── eosscitokenmodule.c │ │ ├── example/ │ │ │ └── eossci.py │ │ ├── scitoken.h │ │ └── setup.py │ ├── ulib/ │ │ ├── hash_align.h │ │ ├── hash_align_prot.h │ │ ├── ulib.c │ │ ├── util_algo.h │ │ └── util_class.h │ └── utils/ │ ├── BackOffInvoker.hh │ ├── BindArguments.hh │ ├── ContainerUtils.hh │ ├── RandUtils.hh │ ├── TypeTraits.hh │ ├── XrdUtils.cc │ └── XrdUtils.hh ├── console/ │ ├── CMakeLists.txt │ ├── CommandFramework.cc │ ├── CommandFramework.hh │ ├── ConsoleArgParser.cc │ ├── ConsoleArgParser.hh │ ├── ConsoleCliCommand.cc │ ├── ConsoleCliCommand.hh │ ├── ConsoleCompletion.cc │ ├── ConsoleCompletion.hh │ ├── ConsoleMain.cc │ ├── ConsoleMain.hh │ ├── ConsoleMainExecutable.cc │ ├── GlobalOptions.hh │ ├── ICommand.hh │ ├── README.md │ ├── RegexUtil.cc │ ├── RegexUtil.hh │ ├── commands/ │ │ ├── HealthCommand.cc │ │ ├── HealthCommand.hh │ │ ├── coms/ │ │ │ └── unused/ │ │ │ ├── com_access.cc │ │ │ ├── com_accounting.cc │ │ │ ├── com_archive.cc │ │ │ ├── com_attr.cc │ │ │ ├── com_backup.cc │ │ │ ├── com_cd.cc │ │ │ ├── com_chmod.cc │ │ │ ├── com_chown.cc │ │ │ ├── com_clear.cc │ │ │ ├── com_cp.cc │ │ │ ├── com_daemon.cc │ │ │ ├── com_debug.cc │ │ │ ├── com_du.cc │ │ │ ├── com_evict.cc │ │ │ ├── com_file.cc │ │ │ ├── com_fuse.cc │ │ │ ├── com_fusex.cc │ │ │ ├── com_geosched.cc │ │ │ ├── com_health.cc │ │ │ ├── com_info.cc │ │ │ ├── com_inspector.cc │ │ │ ├── com_json.cc │ │ │ ├── com_license.cc │ │ │ ├── com_ln.cc │ │ │ ├── com_map.cc │ │ │ ├── com_member.cc │ │ │ ├── com_mkdir.cc │ │ │ ├── com_motd.cc │ │ │ ├── com_mv.cc │ │ │ ├── com_old_find.cc │ │ │ ├── com_print.cc │ │ │ ├── com_proto_access.cc │ │ │ ├── com_proto_acl.cc │ │ │ ├── com_proto_config.cc │ │ │ ├── com_proto_convert.cc │ │ │ ├── com_proto_debug.cc │ │ │ ├── com_proto_devices.cc │ │ │ ├── com_proto_df.cc │ │ │ ├── com_proto_find.cc │ │ │ ├── com_proto_fs.cc │ │ │ ├── com_proto_fsck.cc │ │ │ ├── com_proto_group.cc │ │ │ ├── com_proto_io.cc │ │ │ ├── com_proto_node.cc │ │ │ ├── com_proto_ns.cc │ │ │ ├── com_proto_quota.cc │ │ │ ├── com_proto_recycle.cc │ │ │ ├── com_proto_register.cc │ │ │ ├── com_proto_rm.cc │ │ │ ├── com_proto_route.cc │ │ │ ├── com_proto_sched.cc │ │ │ ├── com_proto_space.cc │ │ │ ├── com_proto_token.cc │ │ │ ├── com_pwd.cc │ │ │ ├── com_quit.cc │ │ │ ├── com_quota.cc │ │ │ ├── com_rclone.cc │ │ │ ├── com_reconnect.cc │ │ │ ├── com_report.cc │ │ │ ├── com_rm.cc │ │ │ ├── com_rmdir.cc │ │ │ ├── com_role.cc │ │ │ ├── com_rtlog.cc │ │ │ ├── com_scitoken.cc │ │ │ ├── com_silent.cc │ │ │ ├── com_squash.cc │ │ │ ├── com_stat.cc │ │ │ ├── com_status.cc │ │ │ ├── com_test.cc │ │ │ ├── com_timing.cc │ │ │ ├── com_touch.cc │ │ │ ├── com_tracker.cc │ │ │ ├── com_version.cc │ │ │ ├── com_vid.cc │ │ │ ├── com_who.cc │ │ │ └── com_whoami.cc │ │ ├── helpers/ │ │ │ ├── AclHelper.cc │ │ │ ├── AclHelper.hh │ │ │ ├── FsHelper.cc │ │ │ ├── FsHelper.hh │ │ │ ├── FsckHelper.cc │ │ │ ├── FsckHelper.hh │ │ │ ├── ICmdHelper.cc │ │ │ ├── ICmdHelper.hh │ │ │ ├── NewfindHelper.cc │ │ │ ├── NewfindHelper.hh │ │ │ ├── NodeHelper.cc │ │ │ ├── NodeHelper.hh │ │ │ ├── RecycleHelper.cc │ │ │ ├── RecycleHelper.hh │ │ │ ├── TokenHelper.cc │ │ │ ├── TokenHelper.hh │ │ │ └── jwk_generator/ │ │ │ ├── c_resource.hpp │ │ │ ├── errors.hpp │ │ │ ├── jwk_generator.hpp │ │ │ ├── keyspecs/ │ │ │ │ ├── ec_key.hpp │ │ │ │ └── rsa_key.hpp │ │ │ ├── libs/ │ │ │ │ ├── base64_url.hpp │ │ │ │ ├── json.hpp │ │ │ │ └── uuid.hpp │ │ │ └── openssl_wrapper.hpp │ │ └── native/ │ │ ├── CoreNativeCommands.cc │ │ ├── LegacySymbols.cc │ │ ├── access-proto-native.cc │ │ ├── accounting-cmd-native.cc │ │ ├── acl-proto-native.cc │ │ ├── archive-cmd-native.cc │ │ ├── attr-cmd-native.cc │ │ ├── backup-cmd-native.cc │ │ ├── cat-com-native.cc │ │ ├── cd-cmd-native.cc │ │ ├── chmod-cmd-native.cc │ │ ├── chown-cmd-native.cc │ │ ├── clear-cmd-native.cc │ │ ├── config-proto-native.cc │ │ ├── convert-proto-native.cc │ │ ├── cp-cmd-native.cc │ │ ├── daemon-native.cc │ │ ├── debug-cmd-native.cc │ │ ├── devices-proto-native.cc │ │ ├── df-proto-native.cc │ │ ├── du-native.cc │ │ ├── du-proto-native.cc │ │ ├── evict-cmd-native.cc │ │ ├── file-cmd-native.cc │ │ ├── fileinfo-alias.cc │ │ ├── find-proto-native.cc │ │ ├── fs-proto-native.cc │ │ ├── fsck-proto-native.cc │ │ ├── fuse-native.cc │ │ ├── fusex-cmd-native.cc │ │ ├── geosched-cmd-native.cc │ │ ├── group-proto-native.cc │ │ ├── health-native.cc │ │ ├── info-alias.cc │ │ ├── info-native.cc │ │ ├── inspector-proto-native.cc │ │ ├── io-proto-native.cc │ │ ├── license-native.cc │ │ ├── ln-cmd-native.cc │ │ ├── ls-cmd-native.cc │ │ ├── ls-compat.cc │ │ ├── map-cmd-native.cc │ │ ├── member-cmd-native.cc │ │ ├── mkdir-cmd-native.cc │ │ ├── motd-cmd-native.cc │ │ ├── mv-alias.cc │ │ ├── node-proto-native.cc │ │ ├── ns-proto-native.cc │ │ ├── oldfind-cmd-native.cc │ │ ├── pwd-native.cc │ │ ├── quota-proto-native.cc │ │ ├── rclone-cmd-native.cc │ │ ├── reconnect-native.cc │ │ ├── recycle-proto-native.cc │ │ ├── register-proto-native.cc │ │ ├── report-native.cc │ │ ├── rm-proto-native.cc │ │ ├── rmdir-cmd-native.cc │ │ ├── role-native.cc │ │ ├── route-proto-native.cc │ │ ├── rtlog-cmd-native.cc │ │ ├── sched-proto-native.cc │ │ ├── scitoken-native.cc │ │ ├── space-proto-native.cc │ │ ├── squash-cmd-native.cc │ │ ├── stat-native.cc │ │ ├── status-native.cc │ │ ├── test-native.cc │ │ ├── token-proto-native.cc │ │ ├── touch-cmd-native.cc │ │ ├── tracker-proto-native.cc │ │ ├── tui-native.cc │ │ ├── version-cmd-native.cc │ │ ├── vid-cmd-native.cc │ │ ├── who-cmd-native.cc │ │ └── whoami-cmd-native.cc │ ├── eos-iam-mapfile │ ├── eosadmin │ └── eosreport ├── coverage/ │ └── eoslcov.rc ├── debian/ │ ├── compat │ ├── control.template │ ├── copyright │ ├── eos-client.install │ ├── eos-fusex.install │ ├── eos-fusex.postinst │ ├── eos-test.install │ ├── eos-testkeytab.install │ ├── eos-testkeytab.postinst │ ├── rules │ └── source/ │ └── format ├── doc/ │ ├── _themes/ │ │ ├── solar_theme/ │ │ │ ├── __init__.py │ │ │ ├── layout.html │ │ │ ├── static/ │ │ │ │ ├── solar.css │ │ │ │ └── solarized-dark.css │ │ │ └── theme.conf │ │ └── sphinx13/ │ │ ├── layout.html │ │ └── theme.conf │ ├── citrine/ │ │ ├── Doxyfile │ │ ├── backup_clone.rst │ │ ├── backup_clone.txt │ │ ├── clicommands/ │ │ │ ├── accounting.rst │ │ │ ├── acl.rst │ │ │ ├── archive.rst │ │ │ ├── attr.rst │ │ │ ├── backup.rst │ │ │ ├── cd.rst │ │ │ ├── chmod.rst │ │ │ ├── chown.rst │ │ │ ├── clear.rst │ │ │ ├── config.rst │ │ │ ├── console.rst │ │ │ ├── cp.rst │ │ │ ├── debug.rst │ │ │ ├── evict.rst │ │ │ ├── exit.rst │ │ │ ├── file.rst │ │ │ ├── fileinfo.rst │ │ │ ├── find.rst │ │ │ ├── fs.rst │ │ │ ├── fsck.rst │ │ │ ├── fuse.rst │ │ │ ├── fusex.rst │ │ │ ├── geosched.rst │ │ │ ├── group.rst │ │ │ ├── health.rst │ │ │ ├── help.rst │ │ │ ├── info.rst │ │ │ ├── inspector.rst │ │ │ ├── io.rst │ │ │ ├── json.rst │ │ │ ├── license.rst │ │ │ ├── ln.rst │ │ │ ├── ls.rst │ │ │ ├── map.rst │ │ │ ├── member.rst │ │ │ ├── mkdir.rst │ │ │ ├── motd.rst │ │ │ ├── mv.rst │ │ │ ├── newfind.rst │ │ │ ├── node.rst │ │ │ ├── ns.rst │ │ │ ├── pointq.rst │ │ │ ├── pwd.rst │ │ │ ├── question.rst │ │ │ ├── quit.rst │ │ │ ├── quota.rst │ │ │ ├── reconnect.rst │ │ │ ├── recycle.rst │ │ │ ├── rm.rst │ │ │ ├── rmdir.rst │ │ │ ├── role.rst │ │ │ ├── route.rst │ │ │ ├── rtlog.rst │ │ │ ├── silent.rst │ │ │ ├── space.rst │ │ │ ├── squash.rst │ │ │ ├── stat.rst │ │ │ ├── test.rst │ │ │ ├── timing.rst │ │ │ ├── token.rst │ │ │ ├── touch.rst │ │ │ ├── tracker.rst │ │ │ ├── transfer.rst │ │ │ ├── version.rst │ │ │ ├── vid.rst │ │ │ ├── who.rst │ │ │ └── whoami.rst │ │ ├── clicommands.rst │ │ ├── conf.py │ │ ├── configuration/ │ │ │ ├── archive.rst │ │ │ ├── balancing.rst │ │ │ ├── converter.rst │ │ │ ├── converter_engine.rst │ │ │ ├── draining.rst │ │ │ ├── egi.rst │ │ │ ├── fsck.rst │ │ │ ├── fuse.rst │ │ │ ├── fusex.rst │ │ │ ├── geobalancer.rst │ │ │ ├── geoscheduling.rst │ │ │ ├── geotags.rst │ │ │ ├── groupbalancer.rst │ │ │ ├── groupdrainer.rst │ │ │ ├── http.rst │ │ │ ├── http_tpc.rst │ │ │ ├── import.rst │ │ │ ├── inspector.rst │ │ │ ├── kinetic.rst │ │ │ ├── logicalpath.rst │ │ │ ├── lru.rst │ │ │ ├── master.rst │ │ │ ├── master_quarkdb.rst │ │ │ ├── namespace.rst │ │ │ ├── permission.rst │ │ │ ├── proxys.rst │ │ │ ├── qos.rst │ │ │ ├── quarkdb.rst │ │ │ ├── quota.rst │ │ │ ├── recyclebin.rst │ │ │ ├── route.rst │ │ │ ├── s3.rst │ │ │ ├── scheduler.rst │ │ │ ├── tracker.rst │ │ │ ├── transfer.rst │ │ │ ├── tty.rst │ │ │ └── wfe.rst │ │ ├── configuration.rst │ │ ├── contents.rst │ │ ├── develop.rst │ │ ├── generate_docs.py │ │ ├── index.rst │ │ ├── install.rst │ │ ├── intro.rst │ │ ├── quickstart/ │ │ │ ├── admin/ │ │ │ │ ├── configure.rst │ │ │ │ └── krb5.rst │ │ │ ├── boxed.rst │ │ │ ├── client/ │ │ │ │ └── configure.rst │ │ │ ├── docker_image.rst │ │ │ ├── install.rst │ │ │ ├── kubernetes.rst │ │ │ ├── ns_quarkdb.rst │ │ │ ├── setup_repo.rst │ │ │ ├── uboxed.rst │ │ │ ├── ubuntu.rst │ │ │ └── update_eos4to5.rst │ │ ├── quickstart.rst │ │ ├── releases/ │ │ │ ├── amber.rst │ │ │ ├── beryl-release.rst │ │ │ ├── beryl.rst │ │ │ ├── citrine-release.rst │ │ │ ├── citrine.rst │ │ │ ├── diopside-release.rst │ │ │ └── diopside.rst │ │ ├── releases.rst │ │ ├── restapi/ │ │ │ ├── fileinfo.rst │ │ │ ├── format.rst │ │ │ ├── fs.rst │ │ │ ├── group.rst │ │ │ ├── grpc.rst │ │ │ ├── node.rst │ │ │ ├── ns.rst │ │ │ ├── putrange.rst │ │ │ ├── space.rst │ │ │ ├── version.rst │ │ │ └── who.rst │ │ ├── restapi.rst │ │ ├── taperestapi/ │ │ │ └── configuration.rst │ │ ├── taperestapi.rst │ │ ├── using/ │ │ │ ├── archive.rst │ │ │ ├── attributelocks.rst │ │ │ ├── eos_services.rst │ │ │ ├── fusex.rst │ │ │ ├── oauth2.rst │ │ │ ├── policies.rst │ │ │ ├── priorities.rst │ │ │ ├── rain.rst │ │ │ ├── reports.rst │ │ │ ├── sharedfs.rst │ │ │ ├── squashfs.rst │ │ │ ├── systemd.rst │ │ │ ├── tokens.rst │ │ │ └── versions.rst │ │ └── using.rst │ └── diopside/ │ ├── architecture/ │ │ └── index.rst │ ├── blog/ │ │ └── features.rst │ ├── conf.py │ ├── configuration.rst │ ├── faq/ │ │ ├── exotic.rst │ │ └── index.rst │ ├── index.rst │ ├── introduction/ │ │ └── index.rst │ ├── manual/ │ │ ├── configuration.rst │ │ ├── develop.rst │ │ ├── egi.rst │ │ ├── formats.rst │ │ ├── getting-started.rst │ │ ├── hardware-installation.rst │ │ ├── index.rst │ │ ├── interfaces.rst │ │ ├── microservices.rst │ │ ├── protocols.rst │ │ └── using.rst │ ├── my-changes.patch │ └── releases/ │ ├── #diopside-release.rst# │ ├── 5.4.0/ │ │ └── recycle_bin_config.rst │ ├── amber.rst │ ├── beryl-release.rst │ ├── beryl.rst │ ├── citrine-release.rst │ ├── citrine.rst │ ├── diopside-release.rst │ ├── diopside.rst │ └── index.rst ├── elrepopackage.spec ├── eos.spec.in ├── fst/ │ ├── CMakeLists.txt │ ├── Config.cc │ ├── Config.hh │ ├── Deletion.hh │ ├── Health.cc │ ├── Health.hh │ ├── Load.cc │ ├── Load.hh │ ├── Namespace.hh │ ├── ScanDir.cc │ ├── ScanDir.hh │ ├── Verify.hh │ ├── XrdFstOfs.cc │ ├── XrdFstOfs.hh │ ├── XrdFstOfsFile.cc │ ├── XrdFstOfsFile.hh │ ├── XrdFstOss.cc │ ├── XrdFstOss.hh │ ├── XrdFstOssFile.cc │ ├── XrdFstOssFile.hh │ ├── checksum/ │ │ ├── Adler.cc │ │ ├── Adler.hh │ │ ├── BLAKE3.hh │ │ ├── CRC32.hh │ │ ├── CRC32C.hh │ │ ├── CRC64.hh │ │ ├── CheckSum.cc │ │ ├── CheckSum.hh │ │ ├── ChecksumGroup.hh │ │ ├── ChecksumPlugins.hh │ │ ├── HWH64.hh │ │ ├── MD5.hh │ │ ├── SHA1.hh │ │ ├── SHA256.hh │ │ ├── XXHASH64.hh │ │ └── cycletimer.h │ ├── eoscp.cc │ ├── filemd/ │ │ ├── FmdAttr.cc │ │ ├── FmdAttr.hh │ │ ├── FmdHandler.cc │ │ ├── FmdHandler.hh │ │ ├── FmdMgm.cc │ │ └── FmdMgm.hh │ ├── http/ │ │ ├── HttpHandler.cc │ │ ├── HttpHandler.hh │ │ ├── HttpHandlerFstFileCache.cc │ │ ├── HttpHandlerFstFileCache.hh │ │ ├── HttpServer.cc │ │ ├── HttpServer.hh │ │ ├── ProtocolHandlerFactory.hh │ │ ├── s3/ │ │ │ ├── S3Handler.cc │ │ │ └── S3Handler.hh │ │ └── xrdhttp/ │ │ ├── EosFstHttpHandler.cc │ │ ├── EosFstHttpHandler.hh │ │ └── README.md │ ├── io/ │ │ ├── AsyncMetaHandler.cc │ │ ├── AsyncMetaHandler.hh │ │ ├── ChunkHandler.cc │ │ ├── ChunkHandler.hh │ │ ├── FileIo.cc │ │ ├── FileIo.hh │ │ ├── FileIoPlugin-Server.cc │ │ ├── FileIoPlugin.cc │ │ ├── FileIoPlugin.hh │ │ ├── FileIoPluginCommon.hh │ │ ├── SimpleHandler.cc │ │ ├── SimpleHandler.hh │ │ ├── VectChunkHandler.cc │ │ ├── VectChunkHandler.hh │ │ ├── davix/ │ │ │ ├── DavixIo.cc │ │ │ └── DavixIo.hh │ │ ├── local/ │ │ │ ├── FsIo.cc │ │ │ ├── FsIo.hh │ │ │ ├── LocalIo.cc │ │ │ └── LocalIo.hh │ │ ├── nfs/ │ │ │ ├── NfsIo.cc │ │ │ └── NfsIo.hh │ │ └── xrd/ │ │ ├── ResponseCollector.cc │ │ ├── ResponseCollector.hh │ │ ├── XrdIo.cc │ │ └── XrdIo.hh │ ├── layout/ │ │ ├── HeaderCRC.cc │ │ ├── HeaderCRC.hh │ │ ├── Layout.cc │ │ ├── Layout.hh │ │ ├── LayoutPlugin.cc │ │ ├── LayoutPlugin.hh │ │ ├── PlainLayout.cc │ │ ├── PlainLayout.hh │ │ ├── RaidDpLayout.cc │ │ ├── RaidDpLayout.hh │ │ ├── RainBlock.cc │ │ ├── RainBlock.hh │ │ ├── RainGroup.cc │ │ ├── RainGroup.hh │ │ ├── RainMetaLayout.cc │ │ ├── RainMetaLayout.hh │ │ ├── ReedSLayout.cc │ │ ├── ReedSLayout.hh │ │ ├── ReplicaParLayout.cc │ │ ├── ReplicaParLayout.hh │ │ ├── gf-complete/ │ │ │ ├── .gitignore │ │ │ ├── AUTHORS │ │ │ ├── COPYING │ │ │ ├── ChangeLog │ │ │ ├── License.txt │ │ │ ├── Makefile.am │ │ │ ├── NEWS │ │ │ ├── README │ │ │ ├── README.txt │ │ │ ├── autogen.sh │ │ │ ├── configure.ac │ │ │ ├── examples/ │ │ │ │ ├── Makefile.am │ │ │ │ ├── gf_example_1.c │ │ │ │ ├── gf_example_2.c │ │ │ │ ├── gf_example_3.c │ │ │ │ ├── gf_example_4.c │ │ │ │ ├── gf_example_5.c │ │ │ │ ├── gf_example_6.c │ │ │ │ └── gf_example_7.c │ │ │ ├── include/ │ │ │ │ ├── gf_complete.h │ │ │ │ ├── gf_general.h │ │ │ │ ├── gf_int.h │ │ │ │ ├── gf_method.h │ │ │ │ ├── gf_rand.h │ │ │ │ ├── gf_w16.h │ │ │ │ ├── gf_w32.h │ │ │ │ ├── gf_w4.h │ │ │ │ ├── gf_w64.h │ │ │ │ └── gf_w8.h │ │ │ ├── m4/ │ │ │ │ ├── ax_check_compile_flag.m4 │ │ │ │ ├── ax_ext.m4 │ │ │ │ ├── ax_gcc_x86_avx_xgetbv.m4 │ │ │ │ ├── ax_gcc_x86_cpuid.m4 │ │ │ │ ├── ltoptions.m4 │ │ │ │ ├── ltsugar.m4 │ │ │ │ └── lt~obsolete.m4 │ │ │ ├── manual/ │ │ │ │ ├── gf-complete.html │ │ │ │ └── style.css │ │ │ ├── src/ │ │ │ │ ├── Makefile.am │ │ │ │ ├── gf.c │ │ │ │ ├── gf_general.c │ │ │ │ ├── gf_method.c │ │ │ │ ├── gf_rand.c │ │ │ │ ├── gf_w128.c │ │ │ │ ├── gf_w16.c │ │ │ │ ├── gf_w32.c │ │ │ │ ├── gf_w4.c │ │ │ │ ├── gf_w64.c │ │ │ │ ├── gf_w8.c │ │ │ │ ├── gf_wgen.c │ │ │ │ └── neon/ │ │ │ │ ├── gf_w16_neon.c │ │ │ │ ├── gf_w32_neon.c │ │ │ │ ├── gf_w4_neon.c │ │ │ │ ├── gf_w64_neon.c │ │ │ │ └── gf_w8_neon.c │ │ │ ├── test/ │ │ │ │ ├── Makefile.am │ │ │ │ └── gf_unit.c │ │ │ └── tools/ │ │ │ ├── Makefile.am │ │ │ ├── gf_add.c │ │ │ ├── gf_div.c │ │ │ ├── gf_inline_time.c │ │ │ ├── gf_methods.c │ │ │ ├── gf_mult.c │ │ │ ├── gf_poly.c │ │ │ ├── gf_time.c │ │ │ └── time_tool.sh │ │ └── jerasure/ │ │ ├── .gitattributes │ │ ├── .gitignore │ │ ├── AUTHORS │ │ ├── COPYING │ │ ├── ChangeLog │ │ ├── Examples/ │ │ │ ├── .gitignore │ │ │ ├── Makefile.am │ │ │ ├── cauchy_01.c │ │ │ ├── cauchy_02.c │ │ │ ├── cauchy_03.c │ │ │ ├── cauchy_04.c │ │ │ ├── decoder.c │ │ │ ├── encode_decode.sh │ │ │ ├── encoder.c │ │ │ ├── jerasure_01.c │ │ │ ├── jerasure_02.c │ │ │ ├── jerasure_03.c │ │ │ ├── jerasure_04.c │ │ │ ├── jerasure_05.c │ │ │ ├── jerasure_06.c │ │ │ ├── jerasure_07.c │ │ │ ├── jerasure_08.c │ │ │ ├── liberation_01.c │ │ │ ├── reed_sol_01.c │ │ │ ├── reed_sol_02.c │ │ │ ├── reed_sol_03.c │ │ │ ├── reed_sol_04.c │ │ │ ├── reed_sol_test_gf.c │ │ │ ├── reed_sol_time_gf.c │ │ │ ├── test_all_gfs.sh │ │ │ ├── test_galois.c │ │ │ └── time_all_gfs_argv_init.sh │ │ ├── License.txt │ │ ├── Makefile.am │ │ ├── NEWS │ │ ├── PERF.txt │ │ ├── README │ │ ├── configure.ac │ │ ├── include/ │ │ │ ├── cauchy.h │ │ │ ├── galois.h │ │ │ ├── jerasure.h │ │ │ ├── liberation.h │ │ │ ├── reed_sol.h │ │ │ └── timing.h │ │ ├── m4/ │ │ │ ├── ax_check_compile_flag.m4 │ │ │ ├── ax_ext.m4 │ │ │ ├── ax_gcc_x86_avx_xgetbv.m4 │ │ │ ├── ax_gcc_x86_cpuid.m4 │ │ │ └── ax_require_defined.m4 │ │ └── src/ │ │ ├── Makefile.am │ │ ├── cauchy.c │ │ ├── cauchy_best_r6.c │ │ ├── galois.c │ │ ├── jerasure.c │ │ ├── liberation.c │ │ ├── reed_sol.c │ │ └── timing.c │ ├── storage/ │ │ ├── Communicator.cc │ │ ├── ErrorReport.cc │ │ ├── FileSystem.cc │ │ ├── FileSystem.hh │ │ ├── MgmSyncer.cc │ │ ├── MonitorVarPartition.hh │ │ ├── Publish.cc │ │ ├── Remover.cc │ │ ├── Report.cc │ │ ├── Scrub.cc │ │ ├── Storage.cc │ │ ├── Storage.hh │ │ ├── Supervisor.cc │ │ ├── TrafficShaping.cc │ │ ├── TrafficShaping.hh │ │ └── Verify.cc │ ├── tools/ │ │ ├── Adler32.cc │ │ ├── CheckBlockXS.cc │ │ ├── CheckSum.cc │ │ ├── ComputeBlockXS.cc │ │ ├── ConvertFileMD.cc │ │ ├── IoPing.c │ │ ├── RainCheck.cc │ │ ├── RainHdrDump.cc │ │ ├── RecoverRaidDP.cc │ │ ├── ScanXS.cc │ │ ├── eosfstinfo │ │ └── eosfstregister │ ├── utils/ │ │ ├── CheckFileReadWithPattern.cc │ │ ├── CreateFileWithPattern.cc │ │ ├── DiskMeasurements.cc │ │ ├── DiskMeasurements.hh │ │ ├── DiskMeasurementsMain.cc │ │ ├── FSPathHandler.cc │ │ ├── FSPathHandler.hh │ │ ├── FTSWalkTree.hh │ │ ├── IoPriority.cc │ │ ├── IoPriority.hh │ │ ├── OpenFileTracker.cc │ │ ├── OpenFileTracker.hh │ │ ├── ScanRate.cc │ │ ├── ScanRate.hh │ │ ├── StdFSWalkTree.hh │ │ ├── TpcInfo.hh │ │ ├── TransformAttr.hh │ │ ├── XrdOfsPathHandler.cc │ │ └── XrdOfsPathHandler.hh │ └── xrdcl_plugins/ │ ├── CMakeLists.txt │ ├── RainFile.cc │ ├── RainFile.hh │ ├── RainPlugin.cc │ └── RainPlugin.hh ├── fusex/ │ ├── CMakeLists.txt │ ├── README.md │ ├── auth/ │ │ ├── AuthenticationGroup.cc │ │ ├── AuthenticationGroup.hh │ │ ├── BoundIdentityProvider.cc │ │ ├── BoundIdentityProvider.hh │ │ ├── CMakeLists.txt │ │ ├── CredentialCache.hh │ │ ├── CredentialFinder.cc │ │ ├── CredentialFinder.hh │ │ ├── CredentialValidator.cc │ │ ├── CredentialValidator.hh │ │ ├── DirectoryIterator.cc │ │ ├── DirectoryIterator.hh │ │ ├── EnvironmentReader.cc │ │ ├── EnvironmentReader.hh │ │ ├── FileDescriptor.hh │ │ ├── JailIdentifier.cc │ │ ├── JailIdentifier.hh │ │ ├── Logbook.cc │ │ ├── Logbook.hh │ │ ├── LoginIdentifier.cc │ │ ├── LoginIdentifier.hh │ │ ├── ProcessCache.cc │ │ ├── ProcessCache.hh │ │ ├── ProcessInfo.cc │ │ ├── ProcessInfo.hh │ │ ├── README.md │ │ ├── RmInfo.cc │ │ ├── RmInfo.hh │ │ ├── ScopedEUidSetter.hh │ │ ├── ScopedFsUidSetter.hh │ │ ├── SecurityChecker.cc │ │ ├── SecurityChecker.hh │ │ ├── UnixAuthenticator.cc │ │ ├── UnixAuthenticator.hh │ │ ├── UserCredentialFactory.cc │ │ ├── UserCredentialFactory.hh │ │ ├── UserCredentials.hh │ │ ├── Utils.cc │ │ ├── Utils.hh │ │ ├── UuidStore.cc │ │ └── UuidStore.hh │ ├── backend/ │ │ ├── backend.cc │ │ └── backend.hh │ ├── benchmark/ │ │ ├── CMakeLists.txt │ │ ├── eos-fusex-certify │ │ └── fusex-benchmark.cc │ ├── cap/ │ │ ├── cap.cc │ │ └── cap.hh │ ├── data/ │ │ ├── bufferll.hh │ │ ├── cache.cc │ │ ├── cache.hh │ │ ├── cacheconfig.hh │ │ ├── cachehandler.hh │ │ ├── cachelock.hh │ │ ├── cachesyncer.cc │ │ ├── cachesyncer.hh │ │ ├── data.cc │ │ ├── data.hh │ │ ├── dircleaner.cc │ │ ├── dircleaner.hh │ │ ├── diskcache.cc │ │ ├── diskcache.hh │ │ ├── interval_tree.hh │ │ ├── io.hh │ │ ├── journalcache.cc │ │ ├── journalcache.hh │ │ ├── memorycache.cc │ │ ├── memorycache.hh │ │ ├── rbtree.hh │ │ ├── xrdclproxy.cc │ │ └── xrdclproxy.hh │ ├── eoscfsd/ │ │ ├── README.md │ │ ├── cfs.sh │ │ ├── cfskey.hh │ │ ├── cfslogin.cc │ │ ├── cfslogin.hh │ │ ├── cfsmapping.hh │ │ ├── cfsquota.hh │ │ ├── cfsrecycle.cc │ │ ├── cfsrecycle.hh │ │ ├── cfsutil.hh │ │ ├── cfsvattr.hh │ │ ├── eoscfsd.cc │ │ ├── eoscfsd.hh │ │ ├── keychange.hh │ │ ├── obfuscate.hh │ │ └── overlay.hh │ ├── eosfusebind │ ├── eosxd/ │ │ ├── eosfuse.cc │ │ ├── eosfuse.hh │ │ ├── llfusexx.hh │ │ └── main.cc │ ├── fuse.conf.example │ ├── fuse.example.stats.json │ ├── fusex.proto │ ├── kv/ │ │ ├── NoKV.cc │ │ ├── NoKV.hh │ │ ├── RocksKV.cc │ │ ├── RocksKV.hh │ │ └── kv.hh │ ├── md/ │ │ ├── kernelcache.hh │ │ ├── md.cc │ │ └── md.hh │ ├── misc/ │ │ ├── ConcurrentMount.cc │ │ ├── ConcurrentMount.hh │ │ ├── FuseException.hh │ │ ├── FuseId.hh │ │ ├── MacOSXHelper.hh │ │ ├── RunningPidScanner.cc │ │ ├── RunningPidScanner.hh │ │ ├── SyncQueue.hh │ │ ├── ThreadPool.hh │ │ ├── Track.hh │ │ ├── filename.hh │ │ ├── fusexrdlogin.cc │ │ ├── fusexrdlogin.hh │ │ ├── longstring.cc │ │ ├── longstring.hh │ │ ├── richacl.hh │ │ └── stringTS.hh │ ├── stat/ │ │ ├── Stat.cc │ │ └── Stat.hh │ ├── submount/ │ │ ├── SubMount.cc │ │ └── SubMount.hh │ ├── tests/ │ │ ├── CMakeLists.txt │ │ ├── auth/ │ │ │ ├── credential-finder.cc │ │ │ ├── environment-reader.cc │ │ │ ├── logbook.cc │ │ │ ├── login-identifier.cc │ │ │ ├── process-cache.cc │ │ │ ├── process-info.cc │ │ │ ├── rm-info.cc │ │ │ ├── security-checker.cc │ │ │ ├── test-utils.cc │ │ │ ├── test-utils.hh │ │ │ └── utils.cc │ │ ├── eos-fusex-git-annex │ │ ├── eos-fusex-recovery │ │ ├── eos-test-fusex-messaging │ │ ├── eos-test-fusex-producer-consumer │ │ ├── interval-tree.cc │ │ ├── ioverify.cc │ │ ├── journal-cache.cc │ │ ├── lru-test.cc │ │ ├── rb-tree.cc │ │ ├── rocks-kv.cc │ │ └── stress/ │ │ └── xrdcl-proxy.cc │ └── tsan/ │ └── suppressions.tsan ├── genversion.sh ├── git/ │ └── bin/ │ └── enable-hooks.sh ├── gitlab-ci/ │ ├── .gitlab-ci-build-macos.yml │ ├── .gitlab-ci-build-ubuntu.yml │ ├── .gitlab-ci-test-dock_include.yml │ ├── .gitlab-ci-test-helm-server-multigroup-values.yml │ ├── .gitlab-ci-test-helm_fusex_values.yml │ ├── .gitlab-ci-test-helm_include.yml │ ├── .gitlab-ci-test-helm_kuberos_values.yml │ ├── .gitlab-ci-test-helm_server_values.yml │ ├── .gitlab-ci-test-k8s_include.yml │ ├── after_script_docker_test.sh │ ├── after_script_k8s_test.sh │ ├── before_script_docker_test.sh │ ├── before_script_k8s_test.sh │ ├── export_codename.sh │ ├── export_commit-type.sh │ ├── generate_debian_metadata.sh │ ├── prebuild_OSbase/ │ │ ├── prebuild-cc7.Dockerfile │ │ ├── prebuild-cc7_exotic.Dockerfile │ │ ├── prebuild-el10.Dockerfile │ │ ├── prebuild-el8.Dockerfile │ │ ├── prebuild-el9-arm64.Dockerfile │ │ └── prebuild-el9.Dockerfile │ ├── publish_deb.sh │ ├── remove_old_artifacts.sh │ ├── remove_old_artifacts_debian.sh │ ├── setup_ccache.sh │ ├── setup_ccache_deb.sh │ ├── setup_ccache_fc.sh │ ├── sign_debian_repository.sh │ ├── store_artifacts.sh │ ├── store_artifacts_debian.sh │ ├── store_stable_artifacts.sh │ └── utilities_func_for_tests.sh ├── icons/ │ └── EOS.icns ├── man/ │ ├── CMakeLists.txt │ ├── README.md │ ├── create_eos_cmds.pl │ └── create_man.sh ├── mgm/ │ ├── #Iostat.cc# │ ├── CMakeLists.txt │ ├── CtaUtils.cc │ ├── CtaUtils.hh │ ├── EosCtaReporter.cc │ ├── EosCtaReporter.hh │ ├── FuseServer/ │ │ ├── Caps.cc │ │ ├── Caps.hh │ │ ├── Clients.cc │ │ ├── Clients.hh │ │ ├── Flush.cc │ │ ├── Flush.hh │ │ ├── FusexCastBatch.hh │ │ ├── Locks.cc │ │ ├── Locks.hh │ │ ├── Namespace.hh │ │ ├── Server.cc │ │ └── Server.hh │ ├── Namespace.hh │ ├── README.md │ ├── access/ │ │ ├── Access.cc │ │ └── Access.hh │ ├── acl/ │ │ ├── Acl.cc │ │ ├── Acl.hh │ │ └── README.md │ ├── adminsocket/ │ │ ├── AdminSocket.cc │ │ └── AdminSocket.hh │ ├── auth/ │ │ ├── AccessChecker.cc │ │ └── AccessChecker.hh │ ├── authz/ │ │ ├── XrdMgmAuthz.cc │ │ └── XrdMgmAuthz.hh │ ├── balancer/ │ │ ├── FsBalancer.cc │ │ ├── FsBalancer.hh │ │ ├── FsBalancerStats.cc │ │ └── FsBalancerStats.hh │ ├── bulk-request/ │ │ ├── BulkRequest.cc │ │ ├── BulkRequest.hh │ │ ├── BulkRequestFactory.cc │ │ ├── BulkRequestFactory.hh │ │ ├── BulkRequestHelper.hh │ │ ├── File.hh │ │ ├── FileCollection.hh │ │ ├── README.md │ │ ├── business/ │ │ │ ├── BulkRequestBusiness.cc │ │ │ └── BulkRequestBusiness.hh │ │ ├── dao/ │ │ │ ├── IBulkRequestDAO.hh │ │ │ ├── factories/ │ │ │ │ ├── AbstractDAOFactory.hh │ │ │ │ ├── ProcDirectoryDAOFactory.cc │ │ │ │ └── ProcDirectoryDAOFactory.hh │ │ │ └── proc/ │ │ │ ├── ProcDirBulkRequestFile.cc │ │ │ ├── ProcDirBulkRequestFile.hh │ │ │ ├── ProcDirectoryBulkRequestDAO.cc │ │ │ ├── ProcDirectoryBulkRequestDAO.hh │ │ │ ├── ProcDirectoryBulkRequestLocations.cc │ │ │ ├── ProcDirectoryBulkRequestLocations.hh │ │ │ └── cleaner/ │ │ │ ├── BulkRequestProcCleaner.cc │ │ │ ├── BulkRequestProcCleaner.hh │ │ │ ├── BulkRequestProcCleanerConfig.cc │ │ │ └── BulkRequestProcCleanerConfig.hh │ │ ├── exception/ │ │ │ ├── BulkRequestException.hh │ │ │ └── PersistencyException.hh │ │ ├── interface/ │ │ │ ├── IMgmFileSystemInterface.hh │ │ │ ├── RealMgmFileSystemInterface.cc │ │ │ └── RealMgmFileSystemInterface.hh │ │ ├── prepare/ │ │ │ ├── CancellationBulkRequest.hh │ │ │ ├── EvictBulkRequest.hh │ │ │ ├── PrepareUtils.cc │ │ │ ├── PrepareUtils.hh │ │ │ ├── StageBulkRequest.hh │ │ │ ├── manager/ │ │ │ │ ├── BulkRequestPrepareManager.cc │ │ │ │ ├── BulkRequestPrepareManager.hh │ │ │ │ ├── PrepareManager.cc │ │ │ │ └── PrepareManager.hh │ │ │ └── query-prepare/ │ │ │ ├── QueryPrepareResult.cc │ │ │ └── QueryPrepareResult.hh │ │ ├── response/ │ │ │ └── QueryPrepareResponse.hh │ │ └── utils/ │ │ ├── PrepareArgumentsWrapper.hh │ │ └── json/ │ │ └── QueryPrepareResponseJson.hh │ ├── commandmap/ │ │ ├── CommandMap.cc │ │ └── CommandMap.hh │ ├── config/ │ │ ├── IConfigEngine.cc │ │ ├── IConfigEngine.hh │ │ ├── QuarkConfigHandler.cc │ │ ├── QuarkConfigHandler.hh │ │ ├── QuarkDBConfigEngine.cc │ │ ├── QuarkDBConfigEngine.hh │ │ └── eos-config-inspect.cc │ ├── convert/ │ │ ├── ConversionInfo.cc │ │ ├── ConversionInfo.hh │ │ ├── ConversionJob.cc │ │ ├── ConversionJob.hh │ │ ├── ConversionTag.hh │ │ ├── ConverterEngine.cc │ │ └── ConverterEngine.hh │ ├── devices/ │ │ ├── Devices.cc │ │ └── Devices.hh │ ├── drain/ │ │ ├── DrainFs.cc │ │ ├── DrainFs.hh │ │ ├── DrainTransferJob.cc │ │ ├── DrainTransferJob.hh │ │ ├── Drainer.cc │ │ └── Drainer.hh │ ├── egroup/ │ │ ├── Egroup.cc │ │ └── Egroup.hh │ ├── eos-repair-tool │ ├── features/ │ │ ├── Features.cc │ │ └── Features.hh │ ├── filesystem/ │ │ ├── FileSystem.cc │ │ └── FileSystem.hh │ ├── fsck/ │ │ ├── Fsck.cc │ │ ├── Fsck.hh │ │ ├── FsckEntry.cc │ │ └── FsckEntry.hh │ ├── fsview/ │ │ ├── FsView.cc │ │ └── FsView.hh │ ├── fuse-locks/ │ │ ├── LockTracker.cc │ │ └── LockTracker.hh │ ├── geobalancer/ │ │ ├── GeoBalancer.cc │ │ └── GeoBalancer.hh │ ├── geotree/ │ │ ├── SchedulingFastTree.hh │ │ ├── SchedulingSlowTree.cc │ │ ├── SchedulingSlowTree.hh │ │ ├── SchedulingTreeCommon.cc │ │ ├── SchedulingTreeCommon.hh │ │ ├── SchedulingTreeTest.cc │ │ └── SchedulingTreeTest.cc.testfile │ ├── geotreeengine/ │ │ ├── GeoTreeEngine.cc │ │ └── GeoTreeEngine.hh │ ├── groupbalancer/ │ │ ├── BalancerEngine.cc │ │ ├── BalancerEngine.hh │ │ ├── BalancerEngineFactory.hh │ │ ├── BalancerEngineTypes.hh │ │ ├── BalancerEngineUtils.hh │ │ ├── ConverterUtils.cc │ │ ├── ConverterUtils.hh │ │ ├── FreeSpaceBalancerEngine.cc │ │ ├── FreeSpaceBalancerEngine.hh │ │ ├── GroupBalancer.cc │ │ ├── GroupBalancer.hh │ │ ├── GroupsInfoFetcher.cc │ │ ├── GroupsInfoFetcher.hh │ │ ├── MinMaxBalancerEngine.cc │ │ ├── MinMaxBalancerEngine.hh │ │ ├── StdDevBalancerEngine.cc │ │ ├── StdDevBalancerEngine.hh │ │ ├── StdDrainerEngine.cc │ │ └── StdDrainerEngine.hh │ ├── groupdrainer/ │ │ ├── DrainProgressTracker.cc │ │ ├── DrainProgressTracker.hh │ │ ├── GroupDrainer.cc │ │ ├── GroupDrainer.hh │ │ └── RetryTracker.hh │ ├── grpc/ │ │ ├── GrpcNsInterface.cc │ │ ├── GrpcNsInterface.hh │ │ ├── GrpcRestGwInterface.cc │ │ ├── GrpcRestGwInterface.hh │ │ ├── GrpcRestGwServer.cc │ │ ├── GrpcRestGwServer.hh │ │ ├── GrpcServer.cc │ │ ├── GrpcServer.hh │ │ ├── GrpcWncInterface.cc │ │ ├── GrpcWncInterface.hh │ │ ├── GrpcWncServer.cc │ │ └── GrpcWncServer.hh │ ├── http/ │ │ ├── HttpHandler.cc │ │ ├── HttpHandler.hh │ │ ├── HttpServer.cc │ │ ├── HttpServer.hh │ │ ├── ProtocolHandlerFactory.hh │ │ ├── rapidxml/ │ │ │ ├── license.txt │ │ │ ├── rapidxml.hpp │ │ │ ├── rapidxml_print.hpp │ │ │ └── rapidxml_utils.hpp │ │ ├── rest-api/ │ │ │ ├── Constants.hh │ │ │ ├── README.md │ │ │ ├── action/ │ │ │ │ ├── Action.hh │ │ │ │ └── tape/ │ │ │ │ ├── TapeAction.hh │ │ │ │ ├── TapeActions.hh │ │ │ │ ├── archiveinfo/ │ │ │ │ │ ├── GetArchiveInfo.cc │ │ │ │ │ └── GetArchiveInfo.hh │ │ │ │ ├── release/ │ │ │ │ │ ├── CreateReleaseBulkRequest.cc │ │ │ │ │ └── CreateReleaseBulkRequest.hh │ │ │ │ └── stage/ │ │ │ │ ├── CancelStageBulkRequest.cc │ │ │ │ ├── CancelStageBulkRequest.hh │ │ │ │ ├── CreateStageBulkRequest.cc │ │ │ │ ├── CreateStageBulkRequest.hh │ │ │ │ ├── DeleteStageBulkRequest.cc │ │ │ │ ├── DeleteStageBulkRequest.hh │ │ │ │ ├── GetStageBulkRequest.cc │ │ │ │ └── GetStageBulkRequest.hh │ │ │ ├── business/ │ │ │ │ └── tape/ │ │ │ │ ├── ITapeRestApiBusiness.hh │ │ │ │ ├── TapeRestApiBusiness.cc │ │ │ │ └── TapeRestApiBusiness.hh │ │ │ ├── config/ │ │ │ │ └── tape/ │ │ │ │ ├── TapeRestApiConfig.cc │ │ │ │ └── TapeRestApiConfig.hh │ │ │ ├── exception/ │ │ │ │ ├── Exceptions.hh │ │ │ │ ├── JsonValidationException.hh │ │ │ │ └── RestException.hh │ │ │ ├── handler/ │ │ │ │ ├── RestHandler.cc │ │ │ │ ├── RestHandler.hh │ │ │ │ ├── tape/ │ │ │ │ │ ├── TapeRestHandler.cc │ │ │ │ │ └── TapeRestHandler.hh │ │ │ │ └── wellknown/ │ │ │ │ ├── WellKnownHandler.cc │ │ │ │ └── WellKnownHandler.hh │ │ │ ├── json/ │ │ │ │ ├── builder/ │ │ │ │ │ ├── JsonModelBuilder.hh │ │ │ │ │ ├── ValidationError.hh │ │ │ │ │ └── jsoncpp/ │ │ │ │ │ ├── JsonCppModelBuilder.hh │ │ │ │ │ └── JsonCppValidator.hh │ │ │ │ └── tape/ │ │ │ │ ├── TapeJsonifiers.hh │ │ │ │ ├── TapeModelBuilders.hh │ │ │ │ ├── TapeRestApiJsonifier.hh │ │ │ │ └── model-builders/ │ │ │ │ └── validators/ │ │ │ │ └── TapeJsonCppValidator.hh │ │ │ ├── manager/ │ │ │ │ ├── RestApiManager.cc │ │ │ │ └── RestApiManager.hh │ │ │ ├── model/ │ │ │ │ ├── tape/ │ │ │ │ │ ├── archiveinfo/ │ │ │ │ │ │ └── GetArchiveInfoResponseModel.hh │ │ │ │ │ ├── common/ │ │ │ │ │ │ ├── ErrorModel.cc │ │ │ │ │ │ ├── ErrorModel.hh │ │ │ │ │ │ └── FilesContainer.hh │ │ │ │ │ └── stage/ │ │ │ │ │ ├── CreateStageBulkRequestModel.hh │ │ │ │ │ ├── CreatedStageBulkRequestResponseModel.hh │ │ │ │ │ ├── GetStageBulkRequestResponseModel.hh │ │ │ │ │ └── PathsModel.hh │ │ │ │ └── wellknown/ │ │ │ │ └── tape/ │ │ │ │ └── GetTapeWellKnownModel.hh │ │ │ ├── response/ │ │ │ │ ├── ErrorHandling.hh │ │ │ │ ├── RestApiResponse.hh │ │ │ │ ├── RestApiResponseFactory.hh │ │ │ │ ├── RestResponseFactory.cc │ │ │ │ └── RestResponseFactory.hh │ │ │ ├── router/ │ │ │ │ └── Router.hh │ │ │ ├── utils/ │ │ │ │ ├── URLBuilder.cc │ │ │ │ ├── URLBuilder.hh │ │ │ │ ├── URLParser.cc │ │ │ │ └── URLParser.hh │ │ │ └── wellknown/ │ │ │ └── tape/ │ │ │ ├── TapeRestApiEndpoint.cc │ │ │ ├── TapeRestApiEndpoint.hh │ │ │ ├── TapeWellKnownInfos.cc │ │ │ └── TapeWellKnownInfos.hh │ │ ├── s3/ │ │ │ ├── S3Handler.cc │ │ │ ├── S3Handler.hh │ │ │ ├── S3Store.cc │ │ │ └── S3Store.hh │ │ ├── webdav/ │ │ │ ├── LockResponse.cc │ │ │ ├── LockResponse.hh │ │ │ ├── PropFindResponse.cc │ │ │ ├── PropFindResponse.hh │ │ │ ├── PropPatchResponse.cc │ │ │ ├── PropPatchResponse.hh │ │ │ ├── WebDAVHandler.cc │ │ │ ├── WebDAVHandler.hh │ │ │ ├── WebDAVResponse.cc │ │ │ └── WebDAVResponse.hh │ │ └── xrdhttp/ │ │ ├── EosMgmHttpHandler.cc │ │ ├── EosMgmHttpHandler.hh │ │ └── README.md │ ├── imaster/ │ │ ├── IMaster.cc │ │ └── IMaster.hh │ ├── inflighttracker/ │ │ ├── InFlightTracker.cc │ │ └── InFlightTracker.hh │ ├── inspector/ │ │ ├── FileInspector.cc │ │ ├── FileInspector.hh │ │ ├── FileInspectorStats.cc │ │ └── FileInspectorStats.hh │ ├── iostat/ │ │ ├── Iostat.cc │ │ └── Iostat.hh │ ├── lru/ │ │ ├── LRU.cc │ │ └── LRU.hh │ ├── macros/ │ │ ├── Macros.cc │ │ └── Macros.hh │ ├── misc/ │ │ ├── AuditHelpers.hh │ │ ├── Constants.hh │ │ └── IdTrackerWithValidity.hh │ ├── namespacestats/ │ │ ├── NamespaceStats.cc │ │ └── NamespaceStats.hh │ ├── ofs/ │ │ ├── XrdMgmOfs.cc │ │ ├── XrdMgmOfs.hh │ │ ├── XrdMgmOfsConfigure.cc │ │ ├── XrdMgmOfsDirectory.cc │ │ ├── XrdMgmOfsDirectory.hh │ │ ├── XrdMgmOfsFile.cc │ │ ├── XrdMgmOfsFile.hh │ │ ├── XrdMgmOfsSecurity.hh │ │ ├── XrdMgmOfsTrace.hh │ │ ├── cmds/ │ │ │ ├── Access.inc │ │ │ ├── Attr.inc │ │ │ ├── Auth.inc │ │ │ ├── Chksum.inc │ │ │ ├── Chmod.inc │ │ │ ├── Chown.inc │ │ │ ├── Coverage.inc │ │ │ ├── DeleteExternal.inc │ │ │ ├── DropReplica.inc │ │ │ ├── ErrorLogListener.inc │ │ │ ├── Exists.inc │ │ │ ├── FAttr.inc │ │ │ ├── Find.inc │ │ │ ├── FsConfigListener.inc │ │ │ ├── Fsctl.inc │ │ │ ├── Link.inc │ │ │ ├── Mkdir.inc │ │ │ ├── PathMap.inc │ │ │ ├── Remdir.inc │ │ │ ├── Rename.inc │ │ │ ├── Rm.inc │ │ │ ├── SharedPath.inc │ │ │ ├── ShouldRedirect.inc │ │ │ ├── ShouldRoute.inc │ │ │ ├── ShouldStall.inc │ │ │ ├── Shutdown.inc │ │ │ ├── Stacktrace.inc │ │ │ ├── Stat.inc │ │ │ ├── Stripes.inc │ │ │ ├── Touch.inc │ │ │ ├── Utimes.inc │ │ │ └── Version.inc │ │ └── fsctl/ │ │ ├── Access.cc │ │ ├── AdjustReplica.cc │ │ ├── Checksum.cc │ │ ├── Chmod.cc │ │ ├── Chown.cc │ │ ├── Commit.cc │ │ ├── CommitHelper.cc │ │ ├── CommitHelper.hh │ │ ├── Drop.cc │ │ ├── Event.cc │ │ ├── Fusex.cc │ │ ├── GetFusex.cc │ │ ├── Getfmd.cc │ │ ├── Mkdir.cc │ │ ├── Open.cc │ │ ├── Readlink.cc │ │ ├── Redirect.cc │ │ ├── Stat.cc │ │ ├── Statvfs.cc │ │ ├── Symlink.cc │ │ ├── Utimes.cc │ │ └── Version.cc │ ├── pathrouting/ │ │ ├── PathRouting.cc │ │ └── PathRouting.hh │ ├── placement/ │ │ ├── ClusterDataTypes.hh │ │ ├── ClusterMap.cc │ │ ├── ClusterMap.hh │ │ ├── FlatScheduler.cc │ │ ├── FlatScheduler.hh │ │ ├── FsScheduler.cc │ │ ├── FsScheduler.hh │ │ ├── PlacementStrategy.cc │ │ ├── PlacementStrategy.hh │ │ ├── RRSeed.hh │ │ ├── RoundRobinPlacementStrategy.cc │ │ ├── RoundRobinPlacementStrategy.hh │ │ ├── ThreadLocalRRSeed.cc │ │ ├── ThreadLocalRRSeed.hh │ │ ├── WeightedRandomStrategy.cc │ │ ├── WeightedRandomStrategy.hh │ │ ├── WeightedRoundRobinStrategy.cc │ │ └── WeightedRoundRobinStrategy.hh │ ├── policy/ │ │ ├── Policy.cc │ │ └── Policy.hh │ ├── proc/ │ │ ├── IProcCommand.cc │ │ ├── IProcCommand.hh │ │ ├── ProcCommand.cc │ │ ├── ProcCommand.hh │ │ ├── ProcInterface.cc │ │ ├── ProcInterface.hh │ │ ├── admin/ │ │ │ ├── Access.cc │ │ │ ├── AccessCmd.cc │ │ │ ├── AccessCmd.hh │ │ │ ├── Backup.cc │ │ │ ├── Backup.hh │ │ │ ├── ConfigCmd.cc │ │ │ ├── ConfigCmd.hh │ │ │ ├── ConvertCmd.cc │ │ │ ├── ConvertCmd.hh │ │ │ ├── DebugCmd.cc │ │ │ ├── DebugCmd.hh │ │ │ ├── DevicesCmd.cc │ │ │ ├── DevicesCmd.hh │ │ │ ├── EvictCmd.cc │ │ │ ├── EvictCmd.hh │ │ │ ├── FileRegisterCmd.cc │ │ │ ├── FileRegisterCmd.hh │ │ │ ├── FsCmd.cc │ │ │ ├── FsCmd.hh │ │ │ ├── FsckCmd.cc │ │ │ ├── FsckCmd.hh │ │ │ ├── Fusex.cc │ │ │ ├── GeoSched.cc │ │ │ ├── GroupCmd.cc │ │ │ ├── GroupCmd.hh │ │ │ ├── IoCmd.cc │ │ │ ├── IoCmd.hh │ │ │ ├── IoShapingCmd.cc │ │ │ ├── NodeCmd.cc │ │ │ ├── NodeCmd.hh │ │ │ ├── NsCmd.cc │ │ │ ├── NsCmd.hh │ │ │ ├── Quota.cc │ │ │ ├── QuotaCmd.cc │ │ │ ├── QuotaCmd.hh │ │ │ ├── Rtlog.cc │ │ │ ├── SchedCmd.cc │ │ │ ├── SchedCmd.hh │ │ │ ├── SpaceCmd.cc │ │ │ ├── SpaceCmd.hh │ │ │ └── Vid.cc │ │ ├── proc_fs.cc │ │ ├── proc_fs.hh │ │ └── user/ │ │ ├── Accounting.cc │ │ ├── AclCmd.cc │ │ ├── AclCmd.hh │ │ ├── Archive.cc │ │ ├── Attr.cc │ │ ├── Cd.cc │ │ ├── Chmod.cc │ │ ├── Chown.cc │ │ ├── DfCmd.cc │ │ ├── DfCmd.hh │ │ ├── File.cc │ │ ├── Fileinfo.cc │ │ ├── Find.cc │ │ ├── Fuse.cc │ │ ├── FuseX.cc │ │ ├── Ls.cc │ │ ├── Map.cc │ │ ├── Member.cc │ │ ├── Mkdir.cc │ │ ├── Motd.cc │ │ ├── NewfindCmd.cc │ │ ├── NewfindCmd.hh │ │ ├── Quota.cc │ │ ├── RecycleCmd.cc │ │ ├── RecycleCmd.hh │ │ ├── Rm.cc │ │ ├── RmCmd.cc │ │ ├── RmCmd.hh │ │ ├── Rmdir.cc │ │ ├── RouteCmd.cc │ │ ├── RouteCmd.hh │ │ ├── TokenCmd.cc │ │ ├── TokenCmd.hh │ │ ├── Version.cc │ │ ├── Who.cc │ │ └── Whoami.cc │ ├── qdbmaster/ │ │ ├── QdbMaster.cc │ │ └── QdbMaster.hh │ ├── quota/ │ │ ├── #Quota.cc# │ │ ├── Quota.cc │ │ └── Quota.hh │ ├── recycle/ │ │ ├── Recycle.cc │ │ ├── Recycle.hh │ │ ├── RecycleEntry.cc │ │ ├── RecycleEntry.hh │ │ ├── RecyclePolicy.cc │ │ └── RecyclePolicy.hh │ ├── routeendpoint/ │ │ ├── RouteEndpoint.cc │ │ └── RouteEndpoint.hh │ ├── scheduler/ │ │ ├── Scheduler.cc │ │ └── Scheduler.hh │ ├── shaping/ │ │ ├── TrafficShaping.cc │ │ └── TrafficShaping.hh │ ├── stat/ │ │ ├── Stat.cc │ │ └── Stat.hh │ ├── tgc/ │ │ ├── AsyncResult.hh │ │ ├── AsyncUint64ShellCmd.cc │ │ ├── AsyncUint64ShellCmd.hh │ │ ├── BlockingFlag.hh │ │ ├── CachedValue.hh │ │ ├── Constants.hh │ │ ├── DummyClock.hh │ │ ├── DummyTapeGcMgm.cc │ │ ├── DummyTapeGcMgm.hh │ │ ├── FreedBytesHistogram.cc │ │ ├── FreedBytesHistogram.hh │ │ ├── IClock.cc │ │ ├── IClock.hh │ │ ├── ITapeGcMgm.cc │ │ ├── ITapeGcMgm.hh │ │ ├── Lru.cc │ │ ├── Lru.hh │ │ ├── MaxLenExceeded.cc │ │ ├── MaxLenExceeded.hh │ │ ├── MultiSpaceTapeGc.cc │ │ ├── MultiSpaceTapeGc.hh │ │ ├── RealClock.cc │ │ ├── RealClock.hh │ │ ├── RealTapeGcMgm.cc │ │ ├── RealTapeGcMgm.hh │ │ ├── SmartSpaceStats.cc │ │ ├── SmartSpaceStats.hh │ │ ├── SpaceConfig.hh │ │ ├── SpaceNotFound.cc │ │ ├── SpaceNotFound.hh │ │ ├── SpaceStats.hh │ │ ├── SpaceToTapeGcMap.cc │ │ ├── SpaceToTapeGcMap.hh │ │ ├── TapeGc.cc │ │ ├── TapeGc.hh │ │ ├── TapeGcStats.hh │ │ └── TestingTapeGc.hh │ ├── tracker/ │ │ ├── ReplicationTracker.cc │ │ └── ReplicationTracker.hh │ ├── utils/ │ │ ├── AttrHelper.cc │ │ ├── AttrHelper.hh │ │ ├── FileSystemRegistry.cc │ │ ├── FileSystemRegistry.hh │ │ ├── FileSystemStatusUtils.cc │ │ ├── FileSystemStatusUtils.hh │ │ ├── FilesystemUuidMapper.cc │ │ └── FilesystemUuidMapper.hh │ ├── vid/ │ │ ├── Vid.cc │ │ └── Vid.hh │ ├── wfe/ │ │ ├── WFE.cc │ │ └── WFE.hh │ ├── wfe.proto │ ├── workflow/ │ │ ├── Workflow.cc │ │ └── Workflow.hh │ ├── xattr/ │ │ ├── XattrLock.hh │ │ └── XattrSet.hh │ └── zmq/ │ ├── ZMQ.cc │ └── ZMQ.hh ├── misc/ │ ├── CMakeLists.txt │ ├── cmake/ │ │ ├── cmake-3.15.5-Linux-x86_64.sh │ │ └── cmake-3.19.7-Linux-x86_64.sh │ ├── egi/ │ │ ├── CMakeLists.txt │ │ ├── eos-info-provider.py │ │ └── eos-star-accounting.py │ ├── etc/ │ │ ├── CMakeLists.txt │ │ ├── auto.cfsd │ │ ├── auto.master.d/ │ │ │ └── cfsd.autofs │ │ ├── bash_completion.d/ │ │ │ └── eos │ │ ├── cron.d/ │ │ │ ├── eos-health │ │ │ ├── eos-logs │ │ │ ├── eos-mgm-monitoring │ │ │ ├── eos-reports │ │ │ └── xrd-alive │ │ ├── eos/ │ │ │ ├── cfsd/ │ │ │ │ └── eoscfsd.conf │ │ │ └── config/ │ │ │ ├── fst/ │ │ │ │ └── fst │ │ │ ├── generic/ │ │ │ │ └── all │ │ │ ├── mgm/ │ │ │ │ ├── auth │ │ │ │ ├── mgm │ │ │ │ └── mgm.modules │ │ │ ├── modules/ │ │ │ │ └── alice │ │ │ └── qdb/ │ │ │ └── qdb │ │ ├── eos.client.keytab │ │ ├── eos.keytab │ │ ├── fuse.conf │ │ ├── fuse.conf.eos │ │ ├── logrotate.d/ │ │ │ ├── eos-fuse-logs │ │ │ ├── eos-fusex-logs │ │ │ └── eos-logs │ │ ├── profile.d/ │ │ │ └── eos-completion.sh │ │ ├── sysconfig/ │ │ │ └── eos_env.example │ │ ├── systemd/ │ │ │ └── system/ │ │ │ ├── eos.service │ │ │ ├── eos.target │ │ │ ├── eos5-fst@.service │ │ │ ├── eos5-mgm@.service │ │ │ ├── eos5-qdb@.service │ │ │ ├── eos5.service │ │ │ ├── eos@.service │ │ │ ├── eos@.socket │ │ │ ├── eos@master.service │ │ │ └── eos@slave.service │ │ ├── xrd.cf.auth │ │ ├── xrd.cf.fed │ │ ├── xrd.cf.fst │ │ ├── xrd.cf.mgm │ │ ├── xrd.cf.prefix │ │ ├── xrd.cf.quarkdb │ │ ├── xrd.cf.sync │ │ └── zsh/ │ │ └── site-functions/ │ │ └── _eos │ ├── sbin/ │ │ ├── CMakeLists.txt │ │ ├── eos-diagnostic-tool │ │ ├── eos-inspectorreport │ │ ├── eos-inspectorstat │ │ ├── eos-jwk-https │ │ ├── eos-jwker.readme │ │ ├── eos-mdreport │ │ ├── eos-mdstat │ │ ├── eos-prom-push │ │ ├── eos-reportstat │ │ ├── eos-status │ │ ├── eos_start.sh │ │ ├── eos_start_pre.sh │ │ ├── mount.eoscfs │ │ ├── mount.eosx │ │ ├── mount.eosx3 │ │ └── umount.fuse │ ├── selinux/ │ │ ├── CMakeLists.txt │ │ ├── README.md │ │ ├── choose_selinux.sh │ │ ├── eosfuse-7.pp │ │ └── eosfuse.te │ ├── usr/ │ │ ├── CMakeLists.txt │ │ ├── eos-fusex-core.conf │ │ └── eosd.conf │ └── var/ │ ├── CMakeLists.txt │ └── eos/ │ ├── test/ │ │ ├── LeakSanitizer.supp │ │ └── fuse/ │ │ └── untar/ │ │ ├── untar.tgz │ │ └── xrootd.tgz │ └── wfe/ │ └── bash/ │ └── shell ├── namespace/ │ ├── CMakeLists.txt │ ├── Constants.cc │ ├── Constants.hh │ ├── MDException.cc │ ├── MDException.hh │ ├── MDLocking.cc │ ├── MDLocking.hh │ ├── Namespace.hh │ ├── PermissionHandler.cc │ ├── PermissionHandler.hh │ ├── Prefetcher.cc │ ├── Prefetcher.hh │ ├── Resolver.cc │ ├── Resolver.hh │ ├── interface/ │ │ ├── ContainerIterators.hh │ │ ├── IContainerMD.hh │ │ ├── IContainerMDSvc.hh │ │ ├── IFileMD.hh │ │ ├── IFileMDSvc.hh │ │ ├── IFsView.hh │ │ ├── INamespaceGroup.hh │ │ ├── INamespaceStats.hh │ │ ├── IQuota.hh │ │ ├── IView.hh │ │ ├── Identifiers.hh │ │ ├── LockableNSObject.hh │ │ └── Misc.hh │ ├── locking/ │ │ ├── BulkNsObjectLocker.hh │ │ ├── NSObjectLocker.hh │ │ └── RawPtr.hh │ ├── ns_quarkdb/ │ │ ├── CMakeLists.txt │ │ ├── CacheRefreshListener.cc │ │ ├── CacheRefreshListener.hh │ │ ├── ConfigurationParser.hh │ │ ├── Constants.hh │ │ ├── ContainerMD.cc │ │ ├── ContainerMD.hh │ │ ├── FileMD.cc │ │ ├── FileMD.hh │ │ ├── LRU.hh │ │ ├── NamespaceGroup.cc │ │ ├── NamespaceGroup.hh │ │ ├── NsQuarkdbPlugin.cc │ │ ├── NsQuarkdbPlugin.hh │ │ ├── QClPerformance.cc │ │ ├── QClPerformance.hh │ │ ├── QdbContactDetails.hh │ │ ├── VersionEnforcement.cc │ │ ├── VersionEnforcement.hh │ │ ├── accounting/ │ │ │ ├── ContainerAccounting.cc │ │ │ ├── ContainerAccounting.hh │ │ │ ├── FileSystemHandler.cc │ │ │ ├── FileSystemHandler.hh │ │ │ ├── FileSystemView.cc │ │ │ ├── FileSystemView.hh │ │ │ ├── QuotaNodeCore.cc │ │ │ ├── QuotaNodeCore.hh │ │ │ ├── QuotaStats.cc │ │ │ ├── QuotaStats.hh │ │ │ ├── SetChangeList.hh │ │ │ ├── SyncTimeAccounting.cc │ │ │ └── SyncTimeAccounting.hh │ │ ├── explorer/ │ │ │ ├── NamespaceExplorer.cc │ │ │ └── NamespaceExplorer.hh │ │ ├── flusher/ │ │ │ ├── MetadataFlusher.cc │ │ │ └── MetadataFlusher.hh │ │ ├── inspector/ │ │ │ ├── AttributeExtraction.cc │ │ │ ├── AttributeExtraction.hh │ │ │ ├── ContainerScanner.cc │ │ │ ├── ContainerScanner.hh │ │ │ ├── FileMetadataFilter.cc │ │ │ ├── FileMetadataFilter.hh │ │ │ ├── FileScanner.cc │ │ │ ├── FileScanner.hh │ │ │ ├── Inspector.cc │ │ │ ├── Inspector.hh │ │ │ ├── OutputSink.cc │ │ │ ├── OutputSink.hh │ │ │ ├── Printing.cc │ │ │ └── Printing.hh │ │ ├── persistency/ │ │ │ ├── ContainerMDSvc.cc │ │ │ ├── ContainerMDSvc.hh │ │ │ ├── FileMDSvc.cc │ │ │ ├── FileMDSvc.hh │ │ │ ├── FileSystemIterator.cc │ │ │ ├── FileSystemIterator.hh │ │ │ ├── MetadataFetcher.cc │ │ │ ├── MetadataFetcher.hh │ │ │ ├── MetadataProvider.cc │ │ │ ├── MetadataProvider.hh │ │ │ ├── MetadataProviderShard.cc │ │ │ ├── MetadataProviderShard.hh │ │ │ ├── NextInodeProvider.cc │ │ │ ├── NextInodeProvider.hh │ │ │ ├── RequestBuilder.cc │ │ │ ├── RequestBuilder.hh │ │ │ ├── Serialization.cc │ │ │ ├── Serialization.hh │ │ │ ├── UnifiedInodeProvider.cc │ │ │ └── UnifiedInodeProvider.hh │ │ ├── tests/ │ │ │ ├── CMakeLists.txt │ │ │ ├── ContainerMDSvcTest.cc │ │ │ ├── EosNamespaceBenchmark.cc │ │ │ ├── FileMDSvcTest.cc │ │ │ ├── FileSystemViewTest.cc │ │ │ ├── HierarchicalViewTest.cc │ │ │ ├── LruBenchmark.cc │ │ │ ├── Main.cc │ │ │ ├── MetadataFiltering.cc │ │ │ ├── MetadataTests.cc │ │ │ ├── MockContainerMD.hh │ │ │ ├── MockContainerMDSvc.hh │ │ │ ├── MockFileMDSvc.hh │ │ │ ├── Namespace.hh │ │ │ ├── NextInodeProviderTest.cc │ │ │ ├── NsTests.cc │ │ │ ├── NsTests.hh │ │ │ ├── OtherTests.cc │ │ │ ├── README.md │ │ │ ├── TestUtils.hh │ │ │ ├── VariousTests.cc │ │ │ └── utils/ │ │ │ └── break-file.py │ │ ├── tools/ │ │ │ ├── EosConvertToLocalityHashes.cc │ │ │ ├── Fid2PathTool.cc │ │ │ ├── InodeToFidTool.cc │ │ │ └── InspectionTool.cc │ │ ├── utils/ │ │ │ ├── FutureVectorIterator.hh │ │ │ ├── QuotaRecomputer.cc │ │ │ └── QuotaRecomputer.hh │ │ └── views/ │ │ ├── HierarchicalView.cc │ │ └── HierarchicalView.hh │ └── utils/ │ ├── Attributes.hh │ ├── BalanceCalculator.hh │ ├── Buffer.hh │ ├── Checksum.hh │ ├── DataHelper.cc │ ├── DataHelper.hh │ ├── Descriptor.cc │ ├── Descriptor.hh │ ├── Etag.cc │ ├── Etag.hh │ ├── FileListRandomPicker.cc │ ├── FileListRandomPicker.hh │ ├── LocalityHint.hh │ ├── Mode.hh │ ├── PathProcessor.hh │ ├── RenameSafetyCheck.hh │ ├── RmrfHelper.hh │ ├── Stat.hh │ └── StringConvertion.hh ├── nginx/ │ ├── README │ ├── etc/ │ │ ├── init.d/ │ │ │ └── nginx.init │ │ ├── logrotate.d/ │ │ │ └── nginx.logrotate │ │ ├── nginx/ │ │ │ └── nginx.eos.conf.template │ │ ├── sysconfig/ │ │ │ ├── nginx.sysconfig │ │ │ └── nginx.sysconfig.systemd │ │ └── systemd/ │ │ └── nginx.service │ ├── jenkins-build.sh │ ├── makesrpm.sh │ ├── nginx-allow-proxy-certs.patch │ ├── nginx-allow-put-redirect.patch │ ├── nginx-no-body-before-redirect.patch │ └── nginx.spec ├── proto/ │ ├── Audit.proto │ ├── CMakeLists.txt │ ├── eos_rest_gateway/ │ │ ├── eos_rest_gateway_service.proto │ │ └── google/ │ │ └── api/ │ │ ├── annotations.proto │ │ └── http.proto │ ├── fst/ │ │ ├── Delete.proto │ │ └── FmdBase.proto │ └── namespace/ │ └── ns_quarkdb/ │ ├── ChangelogEntry.proto │ ├── ContainerMd.proto │ └── FileMd.proto ├── repo/ │ ├── eos-el7-dev.repo │ ├── eos-el7.repo │ ├── eos-el8-dev.repo │ ├── eos-el8.repo │ ├── eos-el8s-dev.repo │ ├── eos-el8s.repo │ ├── eos-el9s-dev.repo │ └── eos-el9s.repo ├── test/ │ ├── CMakeLists.txt │ ├── EosChecksumBenchmark.cc │ ├── EosCryptoTimingTest.cc │ ├── EosHashBenchmark.cc │ ├── EosIdMapBenchmark.cc │ ├── EosLoggingBenchmark.cc │ ├── EosMmap.cc │ ├── EosOpenTruncUpdate.cc │ ├── EosUdpDumper.cc │ ├── TestHmacSha256.cc │ ├── ThreadPoolTest.cc │ ├── XrdCpAbort.cc │ ├── XrdCpAppend.cc │ ├── XrdCpAppendOverlap.cc │ ├── XrdCpBackward.cc │ ├── XrdCpDownloadRandom.cc │ ├── XrdCpExtend.cc │ ├── XrdCpHoles.cc │ ├── XrdCpNonStreaming.cc │ ├── XrdCpPartial.cc │ ├── XrdCpPgRead.cc │ ├── XrdCpPosixCache.cc │ ├── XrdCpRandom.cc │ ├── XrdCpShrink.cc │ ├── XrdCpSlowWriter.cc │ ├── XrdCpTruncate.cc │ ├── XrdCpUpdate.cc │ ├── XrdStress.cc │ ├── XrdStress.hh │ ├── benchmark/ │ │ ├── CMakeLists.txt │ │ ├── ConfigProto.proto │ │ ├── Configuration.cc │ │ ├── Configuration.hh │ │ ├── DirEos.cc │ │ ├── DirEos.hh │ │ ├── FileEos.cc │ │ ├── FileEos.hh │ │ ├── Namespace.hh │ │ ├── ProtoIo.cc │ │ ├── ProtoIo.hh │ │ ├── Result.cc │ │ ├── Result.hh │ │ ├── ResultProto.proto │ │ ├── eosbenchmark.cc │ │ └── eosbenchmark.hh │ ├── eos-accounting-test │ ├── eos-acl-concurrent │ ├── eos-altxs-test │ ├── eos-backup │ ├── eos-backup-browser │ ├── eos-balance-test │ ├── eos-bash │ ├── eos-convert-test │ ├── eos-defaultcc-test │ ├── eos-drain-test │ ├── eos-file-cont-detached-test │ ├── eos-fsck-test │ ├── eos-fst-close-test │ ├── eos-groupdrain-test │ ├── eos-grpc-test │ ├── eos-http-upload-test │ ├── eos-https-functional-test │ ├── eos-instance-test │ ├── eos-instance-test-ci │ ├── eos-io-test │ ├── eos-lru-test │ ├── eos-macaroon-init │ ├── eos-manila-test │ ├── eos-oc-test │ ├── eos-quota-test │ ├── eos-rain-test │ ├── eos-rclone-test │ ├── eos-recycle-test │ ├── eos-rename-test │ ├── eos-squash-test │ ├── eos-synctime-test │ ├── eos-test-utils │ ├── eos-timestamp-test │ ├── eos-token-test │ ├── eos-traffic-shaping-test │ ├── eos_io_tool.cc │ ├── eoscp-rain-test │ ├── fuse/ │ │ └── eos-fuse-test │ ├── fusex/ │ │ ├── eos-fusex-functional-test │ │ └── eos-test-credential-bindings │ ├── microbenchmarks/ │ │ ├── CMakeLists.txt │ │ ├── README.md │ │ ├── common/ │ │ │ ├── BM_AtomicPtr.cc │ │ │ ├── BM_IdMap.cc │ │ │ ├── BM_Random.cc │ │ │ ├── BM_StringUtils.cc │ │ │ ├── BM_ThreadId.cc │ │ │ └── BM_XrdString.cc │ │ ├── main.cc │ │ ├── mgm/ │ │ │ ├── BM_FlatScheduler.cc │ │ │ └── BM_RRSeed.cc │ │ └── namespace/ │ │ └── ns_quarkdb/ │ │ ├── BM_NSLocking.cc │ │ └── README.md │ ├── mq/ │ │ ├── SharedHashLoadTest.cc │ │ ├── XrdMqClientMaster.cc │ │ ├── XrdMqClientTest.cc │ │ ├── XrdMqClientWorker.cc │ │ ├── XrdMqQueueDumper.cc │ │ ├── XrdMqQueueFeeder.cc │ │ ├── XrdMqQueueInjection.cc │ │ ├── XrdMqSharedObjectBroadCastClient.cc │ │ ├── XrdMqSharedObjectClient.cc │ │ └── XrdMqSharedObjectQueueClient.cc │ ├── test-eos-iam-mapfile.py │ └── xrdstress ├── test.cmake ├── unit_tests/ │ ├── CMakeLists.txt │ ├── README.md │ ├── auth_plugin/ │ │ ├── AuthFsTest.cc │ │ ├── Namespace.hh │ │ ├── TestEnv.cc │ │ └── TestEnv.hh │ ├── common/ │ │ ├── AuditTests.cc │ │ ├── BackOffInvokerTests.cc │ │ ├── BufferManagerTests.cc │ │ ├── ConcurrentQueueTests.cc │ │ ├── ConfigStoreTests.cc │ │ ├── ConfigTests.cc │ │ ├── ContainerUtilsTests.cc │ │ ├── CounterTests.cc │ │ ├── EosTokenTests.cc │ │ ├── FileIdTests.cc │ │ ├── FileMapTests.cc │ │ ├── FutureWrapperTests.cc │ │ ├── GlobTests.cc │ │ ├── InodeTests.cc │ │ ├── LoggingTests.cc │ │ ├── LoggingTestsUtils.cc │ │ ├── MappingTestFixture.hh │ │ ├── MappingTests.cc │ │ ├── MemConfigStore.hh │ │ ├── Namespace.hh │ │ ├── ObserverMgrTests.cc │ │ ├── PathTests.cc │ │ ├── RWMutexTest.cc │ │ ├── RandTests.cc │ │ ├── RateLimitTests.cc │ │ ├── RegexWrapperTests.cc │ │ ├── SciTokensTests.cc │ │ ├── ShardedCacheTests.cc │ │ ├── StringConversionTests.cc │ │ ├── StringSplitTests.cc │ │ ├── StringTokenizerTests.cc │ │ ├── StringUtilsTests.cc │ │ ├── SymKeysTests.cc │ │ ├── ThreadPoolTest.cc │ │ ├── TimingTests.cc │ │ ├── UriCapCipherTests.cc │ │ ├── UtilsTests.cc │ │ ├── VariousTests.cc │ │ ├── WebNotifyTests.cc │ │ ├── XrdConnPoolTests.cc │ │ ├── async/ │ │ │ ├── ExecutorMgrTests.cc │ │ │ ├── FollyExecutorFixture.hh │ │ │ └── OpaqueFutureTests.cc │ │ └── concurrency/ │ │ ├── AlignedAtomicArrayTests.cc │ │ ├── AtomicUniquePtrTests.cc │ │ ├── RCUTests.cc │ │ └── ThreadEpochCounterTests.cc │ ├── console/ │ │ ├── AclCmdTest.cc │ │ ├── CmdsTests.cc │ │ ├── ConsoleCompletionTest.cc │ │ ├── ConsoleUtilTests.cc │ │ ├── ParseCommentTest.cc │ │ └── RegexUtilTest.cc │ ├── fst/ │ │ ├── HealthTest.cc │ │ ├── HttpHandlerFstFileCacheTests.cc │ │ ├── LoadTests.cc │ │ ├── MonitorVarPartitionTest.cc │ │ ├── Namespace.hh │ │ ├── NfsIoTests.cc │ │ ├── ResponseCollectorTests.cc │ │ ├── ScanDirTests.cc │ │ ├── TestEnv.cc │ │ ├── TestEnv.hh │ │ ├── TmpDirTree.hh │ │ ├── UtilsTest.cc │ │ ├── WalkDirTreeTests.cc │ │ ├── XrdFstOfsFileInternalTest.cc │ │ ├── XrdFstOfsFileTest.cc │ │ ├── XrdFstOfsTests.cc │ │ ├── XrdFstOssFileTest.cc │ │ ├── XrdIoTests.cc │ │ └── main_fst.cc │ ├── fusex/ │ │ └── StatTests.cc │ ├── mgm/ │ │ ├── AccessTests.cc │ │ ├── AclCmdTests.cc │ │ ├── CapsTests.cc │ │ ├── CommitHelperTests.cc │ │ ├── ConversionInfoTests.cc │ │ ├── CtaUtilsTests.cc │ │ ├── EgroupTests.cc │ │ ├── FileSystemRegistryTests.cc │ │ ├── FsViewTests.cc │ │ ├── FsckEntryTests.cc │ │ ├── FusexCastBatchTests.cc │ │ ├── HttpTests.cc │ │ ├── IdTrackerTests.cc │ │ ├── IostatTests.cc │ │ ├── LRUTests.cc │ │ ├── LockTrackerTests.cc │ │ ├── PolicyTests.cc │ │ ├── ProcFsTests.cc │ │ ├── QuarkDBConfigTests.cc │ │ ├── RecyclePolicyTests.cc │ │ ├── RecycleTests.cc │ │ ├── RoutingTests.cc │ │ ├── XrdMgmOfsFileTests.cc │ │ ├── XrdMgmOfsTests.cc │ │ ├── bulk-request/ │ │ │ ├── BulkRequestPrepareManagerTest.cc │ │ │ ├── MockPrepareMgmFSInterface.cc │ │ │ ├── MockPrepareMgmFSInterface.hh │ │ │ ├── PrepareManagerTest.cc │ │ │ └── PrepareManagerTest.hh │ │ ├── groupbalancer/ │ │ │ ├── BalancerEngineTypeTests.cc │ │ │ ├── FreeSpaceBalancerTests.cc │ │ │ ├── GroupBalancerUtilsTests.cc │ │ │ ├── GroupsInfoFetcherTests.cc │ │ │ ├── MinMaxBalancerEngineTests.cc │ │ │ ├── StdDevBalancerEngineTests.cc │ │ │ └── StdDrainerTests.cc │ │ ├── groupdrainer/ │ │ │ ├── DrainProgressTrackerTests.cc │ │ │ ├── GroupDrainerRetry.cc │ │ │ └── GroupDrainerTests.cc │ │ ├── http/ │ │ │ ├── HttpServerTests.cc │ │ │ └── rest-api/ │ │ │ └── tape/ │ │ │ ├── JsonCPPTapeModelBuilderTest.cc │ │ │ ├── JsonCPPTapeModelBuilderTest.hh │ │ │ ├── RestApiTest.cc │ │ │ └── RestApiTest.hh │ │ ├── placement/ │ │ │ ├── ClusterMapFixture.hh │ │ │ ├── ClusterMapTests.cc │ │ │ ├── FsSchedulerTests.cc │ │ │ ├── PlacementStrategyTests.cc │ │ │ ├── RRSeedTests.cc │ │ │ ├── SchedulerTests.cc │ │ │ └── ThreadLocalRRSeedTests.cc │ │ ├── tgc/ │ │ │ ├── CachedValueTests.cc │ │ │ ├── FreedBytesHistogramTests.cc │ │ │ ├── LruTests.cc │ │ │ ├── MultiSpaceTapeGcTests.cc │ │ │ ├── SmartSpaceStatsTests.cc │ │ │ ├── SpaceToTapeGcMapTests.cc │ │ │ └── TapeGcTests.cc │ │ └── utils/ │ │ └── AttrHelperTests.cc │ └── with_qdb/ │ ├── Main.cc │ ├── TestUtils.cc │ ├── TestUtils.hh │ └── configuration.cc └── utils/ ├── CMakeLists.txt ├── README.osx ├── astylerc ├── centos7-dev-environment.sh ├── centos8-dev-environment.sh ├── clang-format-diff.py ├── el7-packages.sh ├── el9-dev-environment.sh ├── eos-cdmi-setup.sh ├── eos-fst-clean ├── eos-log-clean ├── eos-mgm-clean ├── eos-osx-package-prepare.sh ├── eos-osx-package.sh ├── eos-ports-block ├── eos-tty-broadcast ├── eos-uninstall ├── eos-xrootd-install.sh ├── eosx ├── filter-trace/ │ ├── .gitignore │ ├── eos-filter-stacktrace │ └── test-eos-filter-stacktrace.py ├── flamegraph/ │ ├── eos-make-flamegraph │ ├── eos-util-flamegraph │ └── eos-util-stackcollapse ├── get-xrootd-git-master.sh ├── make-keytab ├── replace-in-sources ├── route-http └── zstdtail.cc ================================================ FILE CONTENTS ================================================ ================================================ FILE: .clang-format ================================================ Language: Cpp AccessModifierOffset: -2 AlignAfterOpenBracket: Align AlignConsecutiveAssignments: false AlignConsecutiveDeclarations: false AlignOperands: Align AlignTrailingComments: true AllowAllParametersOfDeclarationOnNextLine: true AllowShortBlocksOnASingleLine: Never AllowShortCaseLabelsOnASingleLine: false AllowShortFunctionsOnASingleLine: All AllowShortIfStatementsOnASingleLine: false AllowShortLoopsOnASingleLine: false AlwaysBreakAfterDefinitionReturnType: None AlwaysBreakAfterReturnType: AllDefinitions AlwaysBreakBeforeMultilineStrings: false AlwaysBreakTemplateDeclarations: Yes BinPackArguments: true BinPackParameters: true BraceWrapping: AfterClass: false AfterControlStatement: false AfterEnum: false AfterFunction: true AfterNamespace: false AfterObjCDeclaration: false AfterStruct: false AfterUnion: false BeforeCatch: false BeforeElse: false IndentBraces: false BreakBeforeBinaryOperators: None BreakBeforeBraces: Custom BreakBeforeTernaryOperators: true BreakConstructorInitializersBeforeComma: true BreakAfterJavaFieldAnnotations: false BreakStringLiterals: true ColumnLimit: 90 CommentPragmas: '^ IWYU pragma:' ConstructorInitializerAllOnOneLineOrOnePerLine: false ConstructorInitializerIndentWidth: 4 ContinuationIndentWidth: 4 Cpp11BracedListStyle: true DerivePointerAlignment: false DisableFormat: false ExperimentalAutoDetectBinPacking: false ForEachMacros: [ foreach, Q_FOREACH, BOOST_FOREACH ] IncludeCategories: - Regex: '^"(llvm|llvm-c|clang|clang-c)/' Priority: 2 - Regex: '^(<|"(gtest|isl|json)/)' Priority: 3 - Regex: '.*' Priority: 1 IncludeIsMainRegex: '$' IndentCaseLabels: false IndentWidth: 2 IndentWrappedFunctionNames: false KeepEmptyLinesAtTheStartOfBlocks: true MacroBlockBegin: '' MacroBlockEnd: '' MaxEmptyLinesToKeep: 1 NamespaceIndentation: None ObjCBlockIndentWidth: 2 ObjCSpaceAfterProperty: false ObjCSpaceBeforeProtocolList: true PenaltyBreakBeforeFirstCallParameter: 19 PenaltyBreakComment: 300 PenaltyBreakFirstLessLess: 120 PenaltyBreakString: 1000 PenaltyExcessCharacter: 1000000 PenaltyReturnTypeOnItsOwnLine: 60 PointerAlignment: Left ReflowComments: Always SortIncludes: true SpaceAfterCStyleCast: false SpaceBeforeAssignmentOperators: true SpaceBeforeParens: ControlStatements SpaceInEmptyParentheses: false SpacesBeforeTrailingComments: 1 SpacesInAngles: false SpacesInContainerLiterals: true SpacesInCStyleCastParentheses: false SpacesInParentheses: false SpacesInSquareBrackets: false Standard: c++17 TabWidth: 8 UseTab: Never JavaScriptQuotes: Leave InsertBraces: true ================================================ FILE: .clang-tidy ================================================ --- Checks: 'clang-diagnostic-*,clang-analyzer-*,-clang-analyzer-alpha*,*,-cppcoreguidelines-pro-bounds-pointer-arithmetic,-cert-err61-cpp,-misc-throw-by-value-catch-by-reference,-clang-analyzer-alpha.deadcode.UnreachableCode,-cert-err58-cpp,-clang-analyzer-alpha.*,-clang-analyzer-security.insecureAPI.strcpy,-cppcoreguidelines-pro-type-vararg,-cppcoreguidelines-pro-type-reinterpret-cast,-google-runtime-int,-modernize-raw-string-literal,-cppcoreguidelines-pro-bounds-constant-array-index,-llvmlibc-*' WarningsAsErrors: '' HeaderFilterRegex: '' AnalyzeTemporaryDtors: false CheckOptions: - key: cert-dcl59-cpp.HeaderFileExtensions value: h,hh,hpp,hxx - key: cert-err61-cpp.CheckThrowTemporaries value: '1' - key: cert-oop11-cpp.IncludeStyle value: llvm - key: cert-oop11-cpp.UseCERTSemantics value: '1' - key: cppcoreguidelines-pro-bounds-constant-array-index.GslHeader value: '' - key: cppcoreguidelines-pro-bounds-constant-array-index.IncludeStyle value: '0' - key: cppcoreguidelines-pro-type-member-init.IgnoreArrays value: '0' - key: google-build-namespaces.HeaderFileExtensions value: h,hh,hpp,hxx - key: google-global-names-in-headers.HeaderFileExtensions value: h - key: google-readability-braces-around-statements.ShortStatementLines value: '1' - key: google-readability-function-size.BranchThreshold value: '4294967295' - key: google-readability-function-size.LineThreshold value: '4294967295' - key: google-readability-function-size.StatementThreshold value: '800' - key: google-readability-namespace-comments.ShortNamespaceLines value: '10' - key: google-readability-namespace-comments.SpacesBeforeComments value: '2' - key: google-runtime-int.SignedTypePrefix value: int - key: google-runtime-int.TypeSuffix value: '' - key: google-runtime-int.UnsignedTypePrefix value: uint - key: llvm-namespace-comment.ShortNamespaceLines value: '1' - key: llvm-namespace-comment.SpacesBeforeComments value: '1' - key: misc-assert-side-effect.AssertMacros value: assert - key: misc-assert-side-effect.CheckFunctionCalls value: '0' - key: misc-dangling-handle.HandleClasses value: 'std::basic_string_view;std::experimental::basic_string_view' - key: misc-definitions-in-headers.HeaderFileExtensions value: ',h,hh,hpp,hxx' - key: misc-definitions-in-headers.UseHeaderFileExtension value: '1' - key: misc-misplaced-widening-cast.CheckImplicitCasts value: '1' - key: misc-move-constructor-init.IncludeStyle value: llvm - key: misc-move-constructor-init.UseCERTSemantics value: '0' - key: misc-sizeof-expression.WarnOnSizeOfCompareToConstant value: '1' - key: misc-sizeof-expression.WarnOnSizeOfConstant value: '1' - key: misc-sizeof-expression.WarnOnSizeOfThis value: '1' - key: misc-string-constructor.LargeLengthThreshold value: '8388608' - key: misc-string-constructor.WarnOnLargeLength value: '1' - key: misc-suspicious-missing-comma.MaxConcatenatedTokens value: '5' - key: misc-suspicious-missing-comma.RatioThreshold value: '0.200000' - key: misc-suspicious-missing-comma.SizeThreshold value: '5' - key: misc-suspicious-string-compare.StringCompareLikeFunctions value: '' - key: misc-suspicious-string-compare.WarnOnImplicitComparison value: '1' - key: misc-suspicious-string-compare.WarnOnLogicalNotComparison value: '0' - key: misc-throw-by-value-catch-by-reference.CheckThrowTemporaries value: '1' - key: modernize-loop-convert.MaxCopySize value: '16' - key: modernize-loop-convert.MinConfidence value: reasonable - key: modernize-loop-convert.NamingStyle value: CamelCase - key: modernize-pass-by-value.IncludeStyle value: llvm - key: modernize-replace-auto-ptr.IncludeStyle value: llvm - key: modernize-use-nullptr.NullMacros value: 'NULL' - key: performance-faster-string-find.StringLikeClasses value: 'std::basic_string' - key: performance-for-range-copy.WarnOnAllAutoCopies value: '0' - key: readability-braces-around-statements.ShortStatementLines value: '1' - key: readability-function-size.BranchThreshold value: '4294967295' - key: readability-function-size.LineThreshold value: '4294967295' - key: readability-function-size.StatementThreshold value: '800' - key: readability-identifier-naming.AbstractClassCase value: aNy_CasE - key: readability-identifier-naming.AbstractClassPrefix value: '' - key: readability-identifier-naming.AbstractClassSuffix value: '' - key: readability-identifier-naming.ClassCase value: aNy_CasE - key: readability-identifier-naming.ClassConstantCase value: aNy_CasE - key: readability-identifier-naming.ClassConstantPrefix value: '' - key: readability-identifier-naming.ClassConstantSuffix value: '' - key: readability-identifier-naming.ClassMemberCase value: aNy_CasE - key: readability-identifier-naming.ClassMemberPrefix value: '' - key: readability-identifier-naming.ClassMemberSuffix value: '' - key: readability-identifier-naming.ClassMethodCase value: aNy_CasE - key: readability-identifier-naming.ClassMethodPrefix value: '' - key: readability-identifier-naming.ClassMethodSuffix value: '' - key: readability-identifier-naming.ClassPrefix value: '' - key: readability-identifier-naming.ClassSuffix value: '' - key: readability-identifier-naming.ConstantCase value: aNy_CasE - key: readability-identifier-naming.ConstantMemberCase value: aNy_CasE - key: readability-identifier-naming.ConstantMemberPrefix value: '' - key: readability-identifier-naming.ConstantMemberSuffix value: '' - key: readability-identifier-naming.ConstantParameterCase value: aNy_CasE - key: readability-identifier-naming.ConstantParameterPrefix value: '' - key: readability-identifier-naming.ConstantParameterSuffix value: '' - key: readability-identifier-naming.ConstantPrefix value: '' - key: readability-identifier-naming.ConstantSuffix value: '' - key: readability-identifier-naming.ConstexprFunctionCase value: aNy_CasE - key: readability-identifier-naming.ConstexprFunctionPrefix value: '' - key: readability-identifier-naming.ConstexprFunctionSuffix value: '' - key: readability-identifier-naming.ConstexprMethodCase value: aNy_CasE - key: readability-identifier-naming.ConstexprMethodPrefix value: '' - key: readability-identifier-naming.ConstexprMethodSuffix value: '' - key: readability-identifier-naming.ConstexprVariableCase value: aNy_CasE - key: readability-identifier-naming.ConstexprVariablePrefix value: '' - key: readability-identifier-naming.ConstexprVariableSuffix value: '' - key: readability-identifier-naming.EnumCase value: aNy_CasE - key: readability-identifier-naming.EnumConstantCase value: aNy_CasE - key: readability-identifier-naming.EnumConstantPrefix value: '' - key: readability-identifier-naming.EnumConstantSuffix value: '' - key: readability-identifier-naming.EnumPrefix value: '' - key: readability-identifier-naming.EnumSuffix value: '' - key: readability-identifier-naming.FunctionCase value: aNy_CasE - key: readability-identifier-naming.FunctionPrefix value: '' - key: readability-identifier-naming.FunctionSuffix value: '' - key: readability-identifier-naming.GlobalConstantCase value: aNy_CasE - key: readability-identifier-naming.GlobalConstantPrefix value: '' - key: readability-identifier-naming.GlobalConstantSuffix value: '' - key: readability-identifier-naming.GlobalFunctionCase value: aNy_CasE - key: readability-identifier-naming.GlobalFunctionPrefix value: '' - key: readability-identifier-naming.GlobalFunctionSuffix value: '' - key: readability-identifier-naming.GlobalVariableCase value: aNy_CasE - key: readability-identifier-naming.GlobalVariablePrefix value: '' - key: readability-identifier-naming.GlobalVariableSuffix value: '' - key: readability-identifier-naming.IgnoreFailedSplit value: '0' - key: readability-identifier-naming.InlineNamespaceCase value: aNy_CasE - key: readability-identifier-naming.InlineNamespacePrefix value: '' - key: readability-identifier-naming.InlineNamespaceSuffix value: '' - key: readability-identifier-naming.LocalConstantCase value: aNy_CasE - key: readability-identifier-naming.LocalConstantPrefix value: '' - key: readability-identifier-naming.LocalConstantSuffix value: '' - key: readability-identifier-naming.LocalVariableCase value: aNy_CasE - key: readability-identifier-naming.LocalVariablePrefix value: '' - key: readability-identifier-naming.LocalVariableSuffix value: '' - key: readability-identifier-naming.MemberCase value: aNy_CasE - key: readability-identifier-naming.MemberPrefix value: '' - key: readability-identifier-naming.MemberSuffix value: '' - key: readability-identifier-naming.MethodCase value: aNy_CasE - key: readability-identifier-naming.MethodPrefix value: '' - key: readability-identifier-naming.MethodSuffix value: '' - key: readability-identifier-naming.NamespaceCase value: aNy_CasE - key: readability-identifier-naming.NamespacePrefix value: '' - key: readability-identifier-naming.NamespaceSuffix value: '' - key: readability-identifier-naming.ParameterCase value: aNy_CasE - key: readability-identifier-naming.ParameterPackCase value: aNy_CasE - key: readability-identifier-naming.ParameterPackPrefix value: '' - key: readability-identifier-naming.ParameterPackSuffix value: '' - key: readability-identifier-naming.ParameterPrefix value: '' - key: readability-identifier-naming.ParameterSuffix value: '' - key: readability-identifier-naming.PrivateMemberCase value: aNy_CasE - key: readability-identifier-naming.PrivateMemberPrefix value: '' - key: readability-identifier-naming.PrivateMemberSuffix value: '' - key: readability-identifier-naming.PrivateMethodCase value: aNy_CasE - key: readability-identifier-naming.PrivateMethodPrefix value: '' - key: readability-identifier-naming.PrivateMethodSuffix value: '' - key: readability-identifier-naming.ProtectedMemberCase value: aNy_CasE - key: readability-identifier-naming.ProtectedMemberPrefix value: '' - key: readability-identifier-naming.ProtectedMemberSuffix value: '' - key: readability-identifier-naming.ProtectedMethodCase value: aNy_CasE - key: readability-identifier-naming.ProtectedMethodPrefix value: '' - key: readability-identifier-naming.ProtectedMethodSuffix value: '' - key: readability-identifier-naming.PublicMemberCase value: aNy_CasE - key: readability-identifier-naming.PublicMemberPrefix value: '' - key: readability-identifier-naming.PublicMemberSuffix value: '' - key: readability-identifier-naming.PublicMethodCase value: aNy_CasE - key: readability-identifier-naming.PublicMethodPrefix value: '' - key: readability-identifier-naming.PublicMethodSuffix value: '' - key: readability-identifier-naming.StaticConstantCase value: aNy_CasE - key: readability-identifier-naming.StaticConstantPrefix value: '' - key: readability-identifier-naming.StaticConstantSuffix value: '' - key: readability-identifier-naming.StaticVariableCase value: aNy_CasE - key: readability-identifier-naming.StaticVariablePrefix value: '' - key: readability-identifier-naming.StaticVariableSuffix value: '' - key: readability-identifier-naming.StructCase value: aNy_CasE - key: readability-identifier-naming.StructPrefix value: '' - key: readability-identifier-naming.StructSuffix value: '' - key: readability-identifier-naming.TemplateParameterCase value: aNy_CasE - key: readability-identifier-naming.TemplateParameterPrefix value: '' - key: readability-identifier-naming.TemplateParameterSuffix value: '' - key: readability-identifier-naming.TemplateTemplateParameterCase value: aNy_CasE - key: readability-identifier-naming.TemplateTemplateParameterPrefix value: '' - key: readability-identifier-naming.TemplateTemplateParameterSuffix value: '' - key: readability-identifier-naming.TypeTemplateParameterCase value: aNy_CasE - key: readability-identifier-naming.TypeTemplateParameterPrefix value: '' - key: readability-identifier-naming.TypeTemplateParameterSuffix value: '' - key: readability-identifier-naming.TypedefCase value: aNy_CasE - key: readability-identifier-naming.TypedefPrefix value: '' - key: readability-identifier-naming.TypedefSuffix value: '' - key: readability-identifier-naming.UnionCase value: aNy_CasE - key: readability-identifier-naming.UnionPrefix value: '' - key: readability-identifier-naming.UnionSuffix value: '' - key: readability-identifier-naming.ValueTemplateParameterCase value: aNy_CasE - key: readability-identifier-naming.ValueTemplateParameterPrefix value: '' - key: readability-identifier-naming.ValueTemplateParameterSuffix value: '' - key: readability-identifier-naming.VariableCase value: aNy_CasE - key: readability-identifier-naming.VariablePrefix value: '' - key: readability-identifier-naming.VariableSuffix value: '' - key: readability-identifier-naming.VirtualMethodCase value: aNy_CasE - key: readability-identifier-naming.VirtualMethodPrefix value: '' - key: readability-identifier-naming.VirtualMethodSuffix value: '' - key: readability-simplify-boolean-expr.ChainedConditionalAssignment value: '0' - key: readability-simplify-boolean-expr.ChainedConditionalReturn value: '0' ================================================ FILE: .codeclimate.yml ================================================ plugins: cppcheck: enabled: true ================================================ FILE: .ctest/config.cmake ================================================ # This file is meant to contain set commands for options # you'd normally set at configuration when calling CMake. # For example, to compile with C++20, uncomment the line below # set(CMAKE_CXX_STANDARD 20 CACHE STRING "C++Standard") ================================================ FILE: .git-blame-ignore-revs ================================================ # This file contains a list of commits that should be ignored by some git commands including `git blame`. # You may need to enable this feature in your git configuration by running: # - git config blame.ignoreRevsFile .git-blame-ignore-revs # Web interfaces of git such as GitHub and GitLab also support this feature. # Commits that do not add or change any functionality but only reformat the code or fix typos should be added to this file. This way, when you run `git blame` on a file, you won't see these commits and can focus on the commits that actually changed the code. # The following command can be run to check if any of the commits in this file are missing from the history: # - for rev in $(grep -vE '^#|^$' .git-blame-ignore-revs); do git rev-parse -q --verify "${rev}^{commit}" >/dev/null || echo "Missing: $rev"; done fd828b6155b3caf28676b4568065c0911529618b f4b1931dd80ca8708b7802080eb468d5f7a19bda 683def495afe2567b108fb0de9ad8d3eb1ec43c8 ================================================ FILE: .gitignore ================================================ *.lo *.o *.la *.pyc .libs .deps Makefile Makefile.in Console/eos console/iam.cfg XrdMqOfs/xrdmqclientmaster XrdMqOfs/xrdmqclienttest XrdMqOfs/xrdmqclientworker XrdMqOfs/xrdmqcryptotest XrdMqOfs/xrdmqdumper aclocal.m4 autom4te.cache config.guess config.log config.status config.sub configure depcomp eos.spec install-sh libtool ltmain.sh missing doxy.log doxydoc *~ Namespace/tests/text_runner valgrind.supp eos-log-repair /build*/ __pycache__ kineticio-dist.tgz # clion specific configs /cmake-build*/ /.idea/ # eclipse specific configs /.settings/ .cproject .project .vscode nbproject my_clang_cache.cmake # ccache specific configs /ccache/ # documentation artifacts doc/_build/ doc/html/ debian/control # ApMon specific ApMon/*.tar.gz ApMon/rpmbuild* ApMon/eos-apmon-* !ApMon/Makefile # clangd file compile_commands.json .cache ================================================ FILE: .gitlab-ci.yml ================================================ # ************************************************************************ # * EOS - the CERN Disk Storage System * # * Copyright (C) 2023 CERN/Switzerland * # * * # * This program is free software: you can redistribute it and/or modify * # * it under the terms of the GNU General Public License as published by * # * the Free Software Foundation, either version 3 of the License, or * # * (at your option) any later version. * # * * # * This program is distributed in the hope that it will be useful, * # * but WITHOUT ANY WARRANTY; without even the implied warranty of * # * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * # * GNU General Public License for more details. * # * * # * You should have received a copy of the GNU General Public License * # * along with this program. If not, see .* # ************************************************************************ stages: - build:manual - pre-commit - prebuild - build:rpm - build:dockerimage - test - publish - clean variables: CODENAME: "diopside" default: interruptible: true before_script: - source gitlab-ci/export_commit-type.sh - echo "Exporting COMMIT_TYPE=${COMMIT_TYPE}" include: # - template: Code-Quality.gitlab-ci.yml # - local: /gitlab-ci/.gitlab-ci-test-dock_include.yml @note on the file # - local: /gitlab-ci/.gitalb-ci-build-macos.yml - local: /gitlab-ci/.gitlab-ci-build-ubuntu.yml - local: /gitlab-ci/.gitlab-ci-test-k8s_include.yml - local: /gitlab-ci/.gitlab-ci-test-helm_include.yml workflow: auto_cancel: on_new_commit: interruptible rules: - if: $CI_COMMIT_BRANCH variables: #KOJI_SCRATCH: "--scratch --skip-tag" KOJI_SCRATCH: "--scratch" - if: $CI_COMMIT_TAG variables: KOJI_SCRATCH: "" #------------------------------------------------------------------------------- # Prebuild #------------------------------------------------------------------------------- .doc-skip: stage: .pre script: - | if git diff --name-only $CI_COMMIT_BEFORE_SHA..$CI_COMMIT_SHA | grep -qv '^docs/'; then echo "Non doc changes detected, running full pipeline" exit 0 fi echo "Documentation-only changes detected. Skipping pipeline." # In an ideal world this job failing at the top should not trigger builds # we don't live in that world! In order to avoid creating more complex # dependency graphs, we just cancel the pipeline curl -X POST -H "PRIVATE-TOKEN: $GITLAB_CI_TOKEN" "$CI_API_V4_URL/projects/$CI_PROJECT_ID/pipelines/$CI_PIPELINE_ID/cancel" exit 1 allow_failure: true rules: - if: '$CI_PIPELINE_SOURCE == "schedule"' when: never - changes: - docs/**/* when: always - when: never .prebuild-template: &prebuild-template_definition stage: prebuild image: name: gcr.io/kaniko-project/executor:debug entrypoint: [""] script: - export DESTINATION="gitlab-registry.cern.ch/dss/eos/prebuild-${PREBUILD_NAME}-${CODENAME}" - export DOCKERFILE="$CI_PROJECT_DIR/gitlab-ci/prebuild_OSbase/prebuild-${PREBUILD_NAME}.Dockerfile" - echo "{\"auths\":{\"$CI_REGISTRY\":{\"auth\":\"$(echo -n $CI_REGISTRY_USER:$CI_REGISTRY_PASSWORD | base64)\"}}}" > /kaniko/.docker/config.json # no need yet for --build-arg PREBUILD_NAME="$PREBUILD_NAME" --build-arg CMAKE_OPTIONS="$CMAKE_OPTIONS" --build-arg CXXFLAGS="$CXXFLAGS" - /kaniko/executor --cache="false" --destination $DESTINATION --dockerfile $DOCKERFILE --context $CI_PROJECT_DIR --build-arg=EOS_CODENAME="${CODENAME}" --compressed-caching=false --use-new-run only: variables: - $PREBUILD_TRIGGER prebuild-el8: extends: .prebuild-template variables: PREBUILD_NAME: el8 prebuild-el9: extends: .prebuild-template variables: PREBUILD_NAME: el9 prebuild-el10: extends: .prebuild-template variables: PREBUILD_NAME: el10 prebuild-el9-arm64: extends: .prebuild-template variables: PREBUILD_NAME: el9-arm64 tags: - k8s-arm .prebuild-el9_coverage: extends: .prebuild-template variables: PREBUILD_NAME: el9_coverage only: variables: - $COVERAGE_SCHEDULE clone_docker: stage: build:rpm image: gitlab-registry.cern.ch/linuxsupport/alma9-base script: - dnf install --nogpg -y git - git clone https://gitlab.cern.ch/eos/eos-docker.git artifacts: expire_in: 1 day paths: - eos-docker/ #------------------------------------------------------------------------------- # Build RPMs #------------------------------------------------------------------------------- .build-template: &build-template_definition stage: build:rpm variables: PKG_MGR: dnf CMAKE_BIN: cmake script: - git submodule sync --recursive && git submodule update --init -f --recursive - mkdir build; cd build; ${CMAKE_BIN} .. -DPACKAGEONLY=1 -DEOS_GRPC_GW=1 -Wno-dev; make srpm; cd ..; - echo -e "[eos-depend]\nname=EOS dependencies\nbaseurl=http://storage-ci.web.cern.ch/storage-ci/eos/${CODENAME}-depend/el-$(rpm --eval '%{rhel}')/$(uname -m)/\ngpgcheck=0\nenabled=1\npriority=2\n" >> /etc/yum.repos.d/eos-depend.repo - | if [[ ${PKG_MGR} == "yum" ]]; then ${PKG_MGR} remove --nogpgcheck -y eos-xrootd; ${PKG_MGR}-builddep --nogpgcheck -y --setopt="cern*.exclude=xrootd*" build/SRPMS/*; else ${PKG_MGR} builddep --nogpgcheck --allowerasing -y --setopt="cern*.exclude=xrootd*" build/SRPMS/*; fi - | if [[ -n "$CI_COMMIT_TAG" ]]; then export CCACHE_DISABLE=1; ${PKG_MGR} install -y gnupg2; else source gitlab-ci/setup_ccache.sh; fi - rpmbuild --rebuild --with server --with eos_grpc_gateway --define "_rpmdir build/RPMS/" --define "_build_name_fmt %%{NAME}-%%{VERSION}-%%{RELEASE}.%%{ARCH}.rpm" build/SRPMS/* | (ts 2> /dev/null || true; tee) - ccache -s - if [[ -n "$CI_COMMIT_TAG" ]]; then gpg2 --batch --import $STCI_REPO_KEY; printf "" | setsid rpmsign --define='%_gpg_name stci@cern.ch' --define='%_signature gpg' --addsign build/RPMS/*.rpm; fi - mkdir ${BUILD_NAME}_artifacts; cp -rv build/*RPMS/ build/eos-*.tar.gz ${BUILD_NAME}_artifacts cache: key: "$CI_JOB_NAME-$CI_COMMIT_REF_SLUG" paths: - ccache/ artifacts: expire_in: 60 days paths: - ${BUILD_NAME}_artifacts/ build_el8: image: gitlab-registry.cern.ch/dss/eos/prebuild-el8-${CODENAME} variables: BUILD_NAME: el-8 extends: .build-template only: - schedules - tags build_el9: image: gitlab-registry.cern.ch/dss/eos/prebuild-el9-${CODENAME} variables: BUILD_NAME: el-9 extends: .build-template build_el10: image: gitlab-registry.cern.ch/dss/eos/prebuild-el10-${CODENAME} variables: BUILD_NAME: el-10 extends: .build-template only: - schedules - tags build_el9_arm64: image: gitlab-registry.cern.ch/dss/eos/prebuild-el9-arm64-${CODENAME} variables: BUILD_NAME: el-9-arm64 extends: .build-template tags: - k8s-arm only: - schedules - tags #------------------------------------------------------------------------------- # EOS client builds for RHEL #------------------------------------------------------------------------------- .build-client-srpm-template: &build-client-srpm-template_definition stage: build:rpm script: - dnf config-manager --add-repo https://linuxsoft.cern.ch/cern/rhel/$(rpm --eval '%{rhel}')/CERN/$(uname -m)/ --set-enabled - dnf install cern-gpg-keys --nogpgcheck -y - rpm --import /etc/pki/rpm-gpg/* || true - dnf install rpm-build cmake gcc-c++ git -y - git submodule sync --recursive && git submodule update --init -f --recursive - mkdir -pv build; cd build; - cmake ../ -DPACKAGEONLY=1 -DCLIENT=1 -Wno-dev - make srpm; cd ..; - mkdir -p ${CI_JOB_NAME}_artifacts - cp -rv build/SRPMS/ ${CI_JOB_NAME}_artifacts artifacts: expire_in: 60 days paths: - ${CI_JOB_NAME}_artifacts only: - schedules - tags rh-8: extends: .build-client-srpm-template image: gitlab-registry.cern.ch/linuxsupport/ubi8/ubi rh-9: extends: .build-client-srpm-template image: gitlab-registry.cern.ch/linuxsupport/ubi9/ubi rh-10: extends: .build-client-srpm-template image: gitlab-registry.cern.ch/linuxsupport/ubi10/ubi #------------------------------------------------------------------------------- # Fedora builds #------------------------------------------------------------------------------- .build-fedora-template: &build-fedora-template_definition stage: build:rpm script: - dnf install --nogpg -y gcc-c++ cmake make rpm-build which git tar dnf-plugins-core ccache rpm-sign - git submodule sync --recursive && git submodule update --init -f --recursive - mkdir build; cd build - cmake .. -DPACKAGEONLY=1 -Wno-dev; make srpm; cd .. - echo -e "[eos-depend]\nname=EOS dependencies\nbaseurl=http://storage-ci.web.cern.ch/storage-ci/eos/${CODENAME}-depend/${BUILD_NAME}/x86_64/\ngpgcheck=0\nenabled=1\nexclude=xrootd*\npriority=4\n" > /etc/yum.repos.d/eos-depend.repo - dnf builddep --nogpgcheck --allowerasing -y build/SRPMS/* - if [[ -n "$CI_COMMIT_TAG" ]]; then export CCACHE_DISABLE=1; else source gitlab-ci/setup_ccache_fc.sh; fi - rpmbuild --rebuild --with server --define "_rpmdir build/RPMS/" --define "_build_name_fmt %%{NAME}-%%{VERSION}-%%{RELEASE}.%%{ARCH}.rpm" build/SRPMS/* - ccache -s - if [[ -n "$CI_COMMIT_TAG" ]]; then gpg2 --batch --import $STCI_REPO_KEY; printf "" | setsid rpmsign --define='%_gpg_name stci@cern.ch' --define='%_signature gpg' --addsign build/RPMS/*.rpm; fi - mkdir ${BUILD_NAME}_artifacts; cp -R build/SRPMS build/RPMS ${BUILD_NAME}_artifacts cache: key: "$CI_JOB_NAME-$CI_COMMIT_REF_SLUG" paths: - ccache/ artifacts: expire_in: 60 days paths: - ${BUILD_NAME}_artifacts/ allow_failure: true build_fedora_38: extends: .build-fedora-template image: registry.fedoraproject.org/fedora:38 variables: BUILD_NAME: fc-38 only: - schedules - tags build_fedora_rawhide: extends: .build-fedora-template image: registry.fedoraproject.org/fedora:rawhide variables: BUILD_NAME: fc-rawhide only: - schedules when: manual #------------------------------------------------------------------------------- # Exotic builds #------------------------------------------------------------------------------- .build_exotic-template: &build_exotic-template_definition stage: build:rpm variables: PKG_MGR: dnf CMAKE_CMD: cmake3 script: - export DIST=$(rpm --eval '%{rhel}') - ${PKG_MGR} install -y git ccache tar sudo which tar gzip moreutils - git submodule sync --recursive && git submodule update --init -f --recursive - mkdir build; cd build - ${CMAKE_CMD} .. -DPACKAGEONLY=1 ${CMAKE_OPTIONS} -Wno-dev - make srpm; cd ..; - | if [[ "$RPMBUILD_OPTIONS" == *asan* ]]; then echo -e "[eos-asan-depend]\nname=EOS dependencies\nbaseurl=http://storage-ci.web.cern.ch/storage-ci/eos/${CODENAME}-depend/el-${DIST}-asan/x86_64/\ngpgcheck=0\nenabled=1\npriority=2\n" > /etc/yum.repos.d/eos-depend.repo; # Install the asan enabled dependencies ${PKG_MGR} remove -y eos-xrootd eos-folly eos-grpc eos-rocksdb || true; elif [[ "$RPMBUILD_OPTIONS" == *tsan* ]]; then echo -e "[eos-tsan-depend]\nname=EOS dependencies\nbaseurl=http://storage-ci.web.cern.ch/storage-ci/eos/${CODENAME}-depend/el-${DIST}-tsan/x86_64/\ngpgcheck=0\nenabled=1\npriority=2\n" > /etc/yum.repos.d/eos-depend.repo; # Install the tsan enabled dependencies ${PKG_MGR} remove -y eos-xrootd eos-folly eos-grpc eos-rocksdb || true; else echo -e "[eos-depend]\nname=EOS dependencies\nbaseurl=http://storage-ci.web.cern.ch/storage-ci/eos/${CODENAME}-depend/el-${DIST}/x86_64/\ngpgcheck=0\nenabled=1\npriority=2\n" > /etc/yum.repos.d/eos-depend.repo; fi - | if [[ ${PKG_MGR} == "yum" ]]; then ${PKG_MGR}-builddep --nogpgcheck --setopt="cern*.exclude=xrootd*" -y build/SRPMS/* else ${PKG_MGR} install -y dnf-plugins-core ${PKG_MGR} builddep --nogpgcheck --setopt="cern*.exclude=xrootd*" -y build/SRPMS/* fi - mkdir -p ${BUILD_NAME}_artifacts - if [[ -n "$CI_COMMIT_TAG" ]]; then export CCACHE_DISABLE=1; else source gitlab-ci/setup_ccache.sh; fi - rpmbuild --rebuild ${RPMBUILD_OPTIONS} --define "_rpmdir build/RPMS/" --define "_build_name_fmt %%{NAME}-%%{VERSION}-%%{RELEASE}.%%{ARCH}.rpm" build/SRPMS/* | ts - ccache -s - if [[ -n "$CI_COMMIT_TAG" ]]; then gpg2 --batch --import $STCI_REPO_KEY; printf "" | setsid rpmsign --define='%_gpg_name stci@cern.ch' --define='%_signature gpg' --addsign build/RPMS/*.rpm; fi - cp -R build/SRPMS/ build/RPMS/ ${BUILD_NAME}_artifacts cache: key: "$CI_JOB_NAME-$CI_COMMIT_REF_SLUG" paths: - ccache/ artifacts: expire_in: 1 day paths: - ${BUILD_NAME}_artifacts/ dependencies: [] allow_failure: true build_el9_asan: image: gitlab-registry.cern.ch/dss/eos/prebuild-el9-${CODENAME} variables: CMAKE_CMD: cmake BUILD_NAME: el-9-asan CMAKE_OPTIONS: "-DASAN=1" RPMBUILD_OPTIONS: "--with server --with asan" CXXFLAGS: "-Wno-parentheses" # Avoid boost header compilation errors before_script: - ${PKG_MGR} install -y epel-release libasan cmake gcc gcc-c++ rpmdevtools extends: .build_exotic-template when: manual build_client_el9_tsan: image: gitlab-registry.cern.ch/dss/eos/prebuild-el9-${CODENAME} variables: CMAKE_CMD: cmake BUILD_NAME: el-9-tsan CMAKE_OPTIONS: "-DTSAN=1" RPMBUILD_OPTIONS: "--with tsan" CXXFLAGS: "-Wno-parentheses" # Avoid boost header compilation errors before_script: - ${PKG_MGR} install -y epel-release libtsan cmake gcc gcc-c++ which rpmdevtools extends: .build_exotic-template when: manual build_el9_clang: image: gitlab-registry.cern.ch/dss/eos/prebuild-el9-${CODENAME} variables: BUILD_NAME: el-9-clang CMAKE_OPTIONS: "-DCLANG_BUILD=1" RPMBUILD_OPTIONS: "--with clang --with server" extends: .build_exotic-template only: - schedules - triggers # @note Please contact CTA team / jleduc if you want to change this job build_cc7_opt_xrootd: image: gitlab-registry.cern.ch/dss/eos/prebuild-cc7-${CODENAME} variables: PKG_MGR: yum BUILD_NAME: cc7_opt_xrootd CMAKE_OPTIONS: "-DEOS_XROOTD=1" RPMBUILD_OPTIONS: "--with eos_xrootd_rh" before_script: - sed -i "s/pgm \/usr\/bin\/xrdcp/pgm \/bin\/true/g" misc/etc/xrd.cf.fst except: - tags extends: .build_exotic-template when: manual build_el9_coverage: image: gitlab-registry.cern.ch/dss/eos/prebuild-el9-${CODENAME} variables: BUILD_NAME: el9_coverage RPMBUILD_OPTIONS: "--with coverage" only: variables: - $COVERAGE_SCHEDULE extends: .build_exotic-template #------------------------------------------------------------------------------- # Build docker images #------------------------------------------------------------------------------- .build_dockerimage-template: stage: build:dockerimage image: name: gcr.io/kaniko-project/executor:debug entrypoint: [""] variables: EXTRA_TAG: "" script: # @note keep $CACHE orthogonal to $IMAGE_TAG, do not join the "if"s - if [[ -n "$CI_COMMIT_TAG" ]] || [[ "x$CI_PIPELINE_SOURCE" == "xschedule" ]]; then CACHE="false"; else CACHE="true"; fi - if [[ -n "$CI_COMMIT_TAG" ]]; then IMAGE_TAG="$CI_COMMIT_TAG${OS_TAG}${EXTRA_TAG}"; else IMAGE_TAG="$CI_COMMIT_SHORT_SHA${OS_TAG}${EXTRA_TAG}"; fi - IMAGE_REPO="gitlab-registry.cern.ch/dss/eos/eos-ci" - DESTINATION="${IMAGE_REPO}:${IMAGE_TAG}" - echo "CACHE=$CACHE - DESTINATION=$DESTINATION" - echo "{\"auths\":{\"$CI_REGISTRY\":{\"auth\":\"$(echo -n $CI_REGISTRY_USER:$CI_REGISTRY_PASSWORD | base64)\"}}}" > /kaniko/.docker/config.json - /kaniko/executor --cache=$CACHE --destination $DESTINATION --dockerfile $DOCKERFILE --context $CI_PROJECT_DIR --build-arg=EOS_CODENAME="${CODENAME}" --compressed-caching=false --use-new-run retry: 1 el9_docker_image: extends: .build_dockerimage-template variables: DOCKERFILE: eos-docker/Dockerfile_el9 OS_TAG: ".el9" needs: - job: clone_docker - job: build_el9 el10_docker_image: extends: .build_dockerimage-template variables: DOCKERFILE: eos-docker/Dockerfile_el10 OS_TAG: ".el10" needs: - job: clone_docker - job: build_el10 only: - schedules - tags el9_asan_docker_image: extends: .build_dockerimage-template variables: DOCKERFILE: eos-docker/Dockerfile_el9_asan EXTRA_TAG: "_asan" OS_TAG: ".el9" needs: - job: clone_docker - job: build_el9_asan when: manual allow_failure: true .el9_coverage_docker_image: extends: .build_dockerimage-template variables: DOCKERFILE: eos-docker/Dockerfile_coverage EXTRA_TAG: "_coverage" OS_TAG: ".el9" needs: - job: clone_docker - job: build_el9_coverage only: variables: - $COVERAGE_SCHEDULE allow_failure: true #------------------------------------------------------------------------------- # Code quality, from codeclimate plugins - disabled #------------------------------------------------------------------------------- # .code_quality: # artifacts: # paths: [gl-code-quality-report.json] # rules: # - if: '$CI_PIPELINE_SOURCE == "schedule"' # allow_failure: true # .code_quality_html: # extends: code_quality # variables: # REPORT_FORMAT: html # artifacts: # paths: [gl-code-quality-report.html] #------------------------------------------------------------------------------- # Dock8rnetes testing framework (exec_cmd wraps both docker and k8s!) #------------------------------------------------------------------------------- .dock8s_before_script_template: &dock8s_before_script_template stage: test before_script: - case $CI_JOB_NAME in "k8s"* ) source ./gitlab-ci/before_script_k8s_test.sh; source ./gitlab-ci/utilities_func_for_tests.sh --type k8s $K8S_NAMESPACE ;; "dock"* ) source ./gitlab-ci/before_script_docker_test.sh; source ./gitlab-ci/utilities_func_for_tests.sh --type docker; ;; esac variables: OS_TAG: ".el9" .dock8s_after_script_template: &dock8s_after_script_template after_script: - case $CI_JOB_NAME in "k8s"* ) source ./gitlab-ci/after_script_k8s_test.sh ;; "dock"* ) source ./gitlab-ci/after_script_docker_test.sh ;; esac .dock8s_system_test_template: extends: - .dock8s_before_script_template - .dock8s_after_script_template script: - date - exec_cmd eos-mgm1 'eos ns mutex --toggleorder' - exec_cmd eos-mgm1 'eos-instance-test-ci' - date - exec_cmd eos-mgm1 'eos-unit-tests-with-instance -n root://localhost//eos/dockertest/' - exec_cmd eos-mgm1 'grep "RWMutex. Order Checking Error in thread" /var/log/eos/mgm/xrdlog.mgm && exit 1 || exit 0' - date - cp_to_local_cmd eos-cli1:/usr/sbin/eos-test-utils ./eos-test-utils; chmod +x eos-test-utils - cp_to_local_cmd eos-cli1:/usr/sbin/eos-fst-close-test ./eos-fst-close-test; chmod +x eos-fst-close-test - case $CI_JOB_NAME in "k8s"* ) export EOS_MGM_URL="root://eos-mgm1.eos-mgm1.$K8S_NAMESPACE.svc.cluster.local"; ./eos-fst-close-test --mgm ${EOS_MGM_URL} --type k8s $K8S_NAMESPACE ;; "dock"* ) ./eos-fst-close-test --type docker ;; esac - date artifacts: when: on_failure expire_in: 3 days paths: - eos-logs-${CI_JOB_ID}/ .dock8s_convert_fsck_recycle_template: extends: - .dock8s_before_script_template - .dock8s_after_script_template script: - cp_to_local_cmd eos-cli1:/usr/sbin/eos-test-utils ./eos-test-utils; chmod +x eos-test-utils - cp_to_local_cmd eos-cli1:/usr/sbin/eos-convert-test ./eos-convert-test; chmod +x eos-convert-test - cp_to_local_cmd eos-cli1:/usr/sbin/eos-fsck-test ./eos-fsck-test; chmod +x eos-fsck-test - cp_to_local_cmd eos-cli1:/usr/sbin/eos-recycle-test ./eos-recycle-test; chmod +x eos-recycle-test - case $CI_JOB_NAME in "k8s"* ) ./eos-convert-test --type k8s $K8S_NAMESPACE; ./eos-fsck-test --max-delay 600 --type k8s $K8S_NAMESPACE; ./eos-recycle-test --type k8s $K8S_NAMESPACE;; "dock"* ) ./eos-convert-test --type docker; ./eos-fsck-test --max-delay 600 --type docker; ./eos-recycle-test --type docker;; esac - rm -rf eos-test-utils - rm -rf eos-convert-test - rm -rf eos-fsck-test - rm -rf eos-recycle-test artifacts: when: on_failure expire_in: 3 days paths: - eos-logs-${CI_JOB_ID}/ .dock8s_rtb_clone_template: extends: - .dock8s_before_script_template - .dock8s_after_script_template script: # prepare mountpoints - exec_cmd eos-cli1 'atd; at now <<< "mkdir -p /eos1/ && mount -t fuse eosxd -ofsname=mount-1 /eos1/; mkdir -p /eos2/ && mount -t fuse eosxd -ofsname=mount-2 /eos2/;"' - exec_cmd eos-cli1 'count=0; while [[ $count -le 10 ]] && ( [[ ! -d /eos1/dockertest/ ]] || [[ ! -d /eos2/dockertest/ ]] ); do echo "Wait for mount... $count"; (( count++ )); sleep 1; done;' # download tests repo - exec_cmd eos-cli1 'git clone https://gitlab.cern.ch/dss/eosclient-tests.git' - exec_cmd eos-cli1 'cd /eosclient-tests && pip install -r requirements.txt' # ubuntu releases do not support 'clone' yet, skip its test - case $CI_JOB_NAME in "ub_focal"* | "ub_jammy"* ) ;; * ) exec_cmd eos-cli1 'cd /eosclient-tests; clone_tests/clone_test.sh prepare; rc=$?; exit $rc' ;; esac artifacts: when: on_failure expire_in: 3 days paths: - eos-logs-${CI_JOB_ID}/ .dock8s_fusex_test_template: extends: - .dock8s_before_script_template - .dock8s_after_script_template script: # prepare mountpoints - exec_cmd eos-cli1 'atd; at now <<< "mkdir -p /eos1/ && mount -t fuse eosxd -ofsname=mount-1 /eos1/; mkdir -p /eos2/ && mount -t fuse eosxd -ofsname=mount-2 /eos2/;"' - exec_cmd eos-cli1 'count=0; while [[ $count -le 10 ]] && ( [[ ! -d /eos1/dockertest/ ]] || [[ ! -d /eos2/dockertest/ ]] ); do echo "Wait for mount... $count"; (( count++ )); sleep 1; done;' # fusex functional bindings - exec_cmd eos-cli1 'atd; at now <<< "mkdir -p /eosfunctionaltest/ && mount -t fuse eosxd -ofsname=eosdockertest /eosfunctionaltest/;"' - exec_cmd eos-cli1 'count=0; while [[ $count -le 10 ]] && [[ ! -d /eosfunctionaltest/dockertest/ ]] ; do echo "Wait for mount... $count"; (( count++ )); sleep 1; done;' - exec_cmd eos-cli1 'su eos-user -c "mkdir -m 700 -p /eosfunctionaltest/dockertest/credentialtest/ && cd /eosfunctionaltest/dockertest/credentialtest/"' - exec_cmd eos-cli1 'su eos-user -c "eos-test-credential-bindings /eosfunctionaltest/dockertest/credentialtest/"' # fusex benchmark - exec_cmd eos-mgm1 'eos ns mutex --toggleorder' - exec_cmd eos-cli1 'su eos-user -c "mkdir -p /eos1/dockertest/fusex_tests/ && cd /eos1/dockertest/fusex_tests/ && fusex-benchmark"' - exec_cmd eos-mgm1 'grep "RWMutex. Order Checking Error in thread" /var/log/eos/mgm/xrdlog.mgm && exit 1 || exit 0' # download tests repo - exec_cmd eos-cli1 'git clone https://gitlab.cern.ch/dss/eosclient-tests.git' - exec_cmd eos-cli1 'cd /eosclient-tests && pip install -r requirements.txt' # run the tests # @todo(esindril): run "all" tests in schedule mode once these are properly supported # if [[ "$CI_PIPELINE_SOURCE" == "schedule" ]]; # then # exec_cmd eos-mgm1 'eos vid add gateway "eos-cli1.eos-cli1.${K8S_NAMESPACE}.svc.cluster.local" unix'; # exec_cmd eos-cli1 'env EOS_FUSE_NO_ROOT_SQUASH=1 python3 /eosclient-tests/run.py --workdir="/eos1/dockertest /eos2/dockertest" ci'; # fi # until then just run the "ci" tests - exec_cmd eos-cli1 'cd eosclient-tests; for n in prepare/*.sh; do /bin/bash $n prepare; done' - exec_cmd eos-cli1 'su eos-user -c "python3 /eosclient-tests/run.py --workdir=\"/eos1/dockertest /eos2/dockertest\" ci"' - exec_cmd eos-cli1 'cd eosclient-tests; for n in prepare/*.sh; do /bin/bash $n cleanup; done' # fusex test SAMBA gateways authentication settings # this will run on the client pod - exec_cmd eos-mgm1 'eos vid enable sss' - exec_cmd eos-mgm1 'eos vid enable unix' - CLI_POD_HOSTNAME="$(exec_cmd eos-cli1 'hostname -f')" - echo ${CLI_POD_HOSTNAME} - exec_cmd eos-mgm1 "eos vid add gateway ${CLI_POD_HOSTNAME} unix" - exec_cmd eos-cli1 'eos-fusex-functional-test --samba' artifacts: when: on_failure expire_in: 3 days paths: - eos-logs-${CI_JOB_ID}/ .dock8s_cbox_test_template: extends: - .dock8s_before_script_template - .dock8s_after_script_template script: # enable converter and prepare eoshome folder, cernbox alike - exec_cmd eos-mgm1 'eos convert config set status=on' - exec_cmd eos-mgm1 './eos_create_userhome.sh eos-user' # prepare mountpoints - exec_cmd eos-cli1 'atd; at now <<< "mkdir -p /eos/ && mount -t fuse eosxd -ofsname=eosdockertest /eos/"' - exec_cmd eos-cli1 'count=0; while [[ $count -le 10 ]] && ( [[ ! -d /eos/ ]] ); do echo "Wait for mount... $count"; (( count++ )); sleep 1; done;' # set krb5 ticket and download tests repo @note the 'export KRB5CCNAME to FILE: type' is a spooky trick, can be made nicer. - exec_cmd eos-cli1 'echo -e "export KRB5CCNAME=FILE:/tmp/krb5cc_$(id -u eos-user)" >> ~/.bashrc' - exec_cmd eos-cli1 'su eos-user -c "kinit eos-user@TEST.EOS -k -t /home/eos-user/eos-user.keytab"' - exec_cmd eos-cli1 'su eos-user -c "git clone https://gitlab.cern.ch/dss/eosclient-tests.git /eos/user/e/eos-user/eosclient-tests"' - exec_cmd eos-cli1 'su eos-user -c "cd /eos/user/e/eos-user/eosclient-tests && pip install -r requirements.txt"' # launch the tests - exec_cmd eos-cli1 'su eos-user -c "cd /eos/user/e/eos-user && python3 ./eosclient-tests/run.py --workdir=/eos/user/e/eos-user ci-eosfuse_release"' - exec_cmd eos-cli1 'su eos-user -c "cd /eos/user/e/eos-user && python3 ./eosclient-tests/run.py --workdir=/eos/user/e/eos-user regression"' artifacts: when: on_failure expire_in: 3 days paths: - eos-logs-${CI_JOB_ID}/ .dock8s_reva_test_template: extends: - .dock8s_before_script_template - .dock8s_after_script_template script: # enable converter and prepare eoshome folder, cernbox alike - exec_cmd eos-mgm1 'eos convert config set status=on' - exec_cmd eos-mgm1 './eos_create_userhome.sh eos-user' # prepare mountpoints - exec_cmd eos-cli1 'atd; at now <<< "mkdir -p /eos/ && mount -t fuse eosxd -ofsname=eosdockertest /eos/"' - exec_cmd eos-cli1 'count=0; while [[ $count -le 10 ]] && ( [[ ! -d /eos/ ]] ); do echo "Wait for mount... $count"; (( count++ )); sleep 1; done;' # install dependencies - exec_cmd eos-mgm1 'yum -y install nodejs npm git make tar' # Install go - exec_cmd eos-mgm1 "export PATH=\"/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:$PATH\" && /usr/bin/wget https://go.dev/dl/go1.25.1.linux-amd64.tar.gz && /usr/bin/tar -C /usr/local -xzf go1.25.1.linux-amd64.tar.gz && ln -s /usr/local/go/bin/go /usr/bin/go && ln -s /usr/local/go/bin/gofmt /usr/bin/gofmt || echo Failed to install golang" - exec_cmd eos-mgm1 'npm install -g @intuit/judo' # Build reva from latest tag - exec_cmd eos-mgm1 'git clone https://github.com/cs3org/reva.git' - | exec_cmd eos-mgm1 'export PATH="$(/usr/bin/go env GOPATH)/bin:$PATH"; cd reva; git fetch --tags ; export latest_tag=$(git tag --sort=-version:refname | grep -E "^v[0-9]+\.[0-9]+\.[0-9]+$" | head -1) ; echo "Building reva tag: $latest_tag" ; git checkout "$latest_tag"; make reva; make cernbox-revad; mkdir -p ../tmp' # Put users.json and groups.json in the right place - exec_cmd eos-mgm1 'cp reva/tests/integration/reva-cli/config/users.json tmp/users.json' - exec_cmd eos-mgm1 'cp reva/tests/integration/reva-cli/config/groups.json tmp/groups.json' # Put eos path as basepath for tests: - | exec_cmd eos-mgm1 \ "find reva/tests/integration/reva-cli -type f -name '*.yaml' -exec sed -i 's|^[[:space:]]*BASEDIR: \"/localfs\"$| BASEDIR: \"/eos/test\"|' {} +" # Prepare mgm (add users, set vid mapping, create directory) - exec_cmd eos-mgm1 "groupadd -g 123 myusers" - exec_cmd eos-mgm1 "useradd -u 1255 -g myusers testuser" - exec_cmd eos-mgm1 "useradd -u 1256 -g myusers testreceivinguser" - exec_cmd eos-mgm1 "eos mkdir /eos/test" - exec_cmd eos-mgm1 "eos chown 1255:123 /eos/test" - exec_cmd eos-mgm1 "eos acl --sys u:1255=rwx /eos" - exec_cmd eos-mgm1 "eos recycle config --add-bin /eos/" - exec_cmd eos-mgm1 "eos attr set sys.versioning=\"10\" /eos/test" - exec_cmd eos-mgm1 "eos attr set sys.forced.atomic=\"1\" /eos/test" - exec_cmd eos-mgm1 "eos attr set sys.allow.oc.sync=\"1\" /eos/test" - exec_cmd eos-mgm1 "eos attr ls /eos/test" - exec_cmd eos-mgm1 "eos vid add gateway \"127.0.0.1\" grpc" - exec_cmd eos-mgm1 "eos vid add gateway \"[:1]\" grpc" - exec_cmd eos-mgm1 "eos vid add gateway \"[::1]\" grpc" - exec_cmd eos-mgm1 "eos vid add gateway \"127.0.0.1\" https" - exec_cmd eos-mgm1 "eos vid add gateway \"[:1]\" https" - exec_cmd eos-mgm1 "eos vid add gateway \"[::1]\" https" - exec_cmd eos-mgm1 "eos vid set map -grpc key:auth_key vuid:11 vgid:11" - exec_cmd eos-mgm1 "eos vid set map -https key:auth_key vuid:11 vgid:11" - exec_cmd eos-mgm1 "eos vid set membership 11 +sudo" - exec_cmd eos-mgm1 "eos vid set membership 11 -uids 3" - exec_cmd eos-mgm1 "eos vid set membership 11 -gids 4" - exec_cmd eos-mgm1 "eos access allow group myusers" - echo ${MGM_POD_HOSTNAME} # We connect over https, and the certificate is only valid for the hostname, so replace localhost with the MGM's hostname - exec_cmd eos-mgm1 'sed -i "s/^\(master_url = \"https:\/\/\)localhost\(:[0-9][0-9]*\"\)/\1$(hostname -f)\2/" reva/tests/integration/reva-cli/config/revad-eos.toml' # Start revad - exec_cmd eos-mgm1 './reva/cmd/revad/revad -c reva/tests/integration/reva-cli/config/revad-eos.toml revad.log 2>&1 & echo $! > revad.pid' # Tests and debug output - exec_cmd eos-mgm1 "cd reva; make test-reva-cli" - exec_cmd eos-mgm1 "eos vid ls" - exec_cmd eos-mgm1 "eos access ls" - exec_cmd eos-mgm1 "cat revad.log" artifacts: when: on_failure expire_in: 3 days paths: - eos-logs-${CI_JOB_ID}/ allow_failure: true .dock8s_flamegraph_test_template: extends: - .dock8s_before_script_template - .dock8s_after_script_template script: - date - echo 0 > /proc/sys/kernel/perf_event_paranoid; cat /proc/sys/kernel/perf_event_paranoid - echo 0 > /proc/sys/kernel/kptr_restrict; cat /proc/sys/kernel/kptr_restrict - exec_cmd eos-mgm1 "mkdir eos-flamegraph-data; cd eos-flamegraph-data; /usr/sbin/eos-make-flamegraph" artifacts: expire_in: 1 days paths: - eos-logs-${CI_JOB_ID}/ .unit_test_template: &unit_test_template_definition stage: test variables: OS_TAG: ".el9" script: # generic unit tests - eos-unit-tests - eos-unit-tests-fst - eos-fusex-tests - pip3 install pytest; python3 -m pytest /usr/sbin/test-eos-iam-mapfile.py # namespace specific unit tests - export EOS_QUARKDB_HOSTPORT=localhost:7777 - quarkdb-create --path /var/quarkdb/node-0 - chown -R daemon:daemon /var/quarkdb/node-0 - xrootd -n qdb -c /etc/xrd.cf.quarkdb -l /var/log/eos/xrdlog.qdb -b -Rdaemon - eos-ns-quarkdb-tests - cp /usr/sbin/qclient-tests . && GTEST_DEATH_TEST_USE_FORK=1 ./qclient-tests needs: - job: el9_docker_image artifacts: false retry: 1 tags: - docker_node - dock unit_test:tag: extends: .unit_test_template image: name: gitlab-registry.cern.ch/dss/eos/eos-ci:${CI_COMMIT_TAG}${OS_TAG} entrypoint: ["/bin/bash", "-c"] only: - tags unit_test: extends: .unit_test_template image: name: gitlab-registry.cern.ch/dss/eos/eos-ci:${CI_COMMIT_SHORT_SHA}${OS_TAG} entrypoint: ["/bin/bash", "-c"] except: - tags unit_test_asan: extends: .unit_test_template image: name: gitlab-registry.cern.ch/dss/eos/eos-ci:${CI_COMMIT_SHORT_SHA}${OS_TAG}${EXTRA_TAG} entrypoint: ["/bin/bash", "-c"] variables: LSAN_OPTIONS: "suppressions=/var/eos/test/LeakSanitizer.supp" # Suppress known memory leaks. For the generic tests ASAN_OPTIONS: "fast_unwind_on_malloc=0" # Avoid indirect leaks from linked dependencies. For the namespace tests EXTRA_TAG: "_asan" needs: - job: el9_asan_docker_image artifacts: false when: manual allow_failure: true #------------------------------------------------------------------------------- # RPM publishing #------------------------------------------------------------------------------- .publish_koji_template: &publish_koji_template_definition stage: publish image: gitlab-registry.cern.ch/linuxsupport/rpmci/kojicli script: - yum install --nogpg -y sssd-client - kinit stci@CERN.CH -k -t /stci.krb5/stci.keytab # KOJI_SCRATCH will be set for branches and empty for tags - koji build ${KOJI_SCRATCH} ${TARGET} ${BUILD_NAME}_artifacts/SRPMS/*.src.rpm tags: - docker_node - publish when: manual publish_koji_al8: <<: *publish_koji_template_definition variables: TARGET: "eos8al" BUILD_NAME: "el-8" only: - schedules - tags needs: - job: build_el8 artifacts: true publish_koji_al9: <<: *publish_koji_template_definition variables: TARGET: "eos9al" BUILD_NAME: "el-9" needs: - job: build_el9 artifacts: true publish_koji_al10: <<: *publish_koji_template_definition variables: TARGET: "eos10al" BUILD_NAME: "el-10" only: - schedules - tags needs: - job: build_el10 artifacts: true publish_koji_rh-8: <<: *publish_koji_template_definition variables: TARGET: "eos8el" BUILD_NAME: "rh-8" only: - schedules - tags needs: - job: rh-8 publish_koji_rh-9: <<: *publish_koji_template_definition variables: TARGET: "eos9el" BUILD_NAME: "rh-9" only: - schedules - tags needs: - job: rh-9 publish_koji_rh-10: <<: *publish_koji_template_definition variables: TARGET: "eos10el" BUILD_NAME: "rh-10" only: - schedules - tags needs: - job: rh-10 email_notification: stage: publish image: gitlab-registry.cern.ch/linuxsupport/alma9-base variables: ENV: production TO_ADDRS: project-eos-commits@cern.ch,lxbatch-experts@cern.ch script: - dnf install -y git python pip - git clone https://token:$EOS_REPO_MAILSERVICE_TOKEN@gitlab.cern.ch/eos/eos-mailservices-code-samples.git - cd eos-mailservices-code-samples/Python/oauth2-samples; pip install --no-input -r requirements.txt - export EOS_VERSION=$CI_COMMIT_TAG - python -m oauth2_smtp needs: - job: publish_koji_al9 only: - tags # This job uses CI_JOB_TOKEN to trigger a pipeline in the CTA project. # This means that whoever triggers this job, must have the rights to start a pipeline in CTA. notify_cta_project: stage: publish image: gitlab-registry.cern.ch/linuxsupport/alma9-base needs: - job: el9_docker_image artifacts: false variables: CTA_BRANCH: main script: - dnf install --nogpg -y curl gawk jq - CTA_PROJECT_ID="139306" - CTA_PROJECT_API="https://gitlab.cern.ch/api/v4/projects/$CTA_PROJECT_ID" - TRIGGER_URL="$CTA_PROJECT_API/trigger/pipeline" # Construct EOS versions - | if [[ -z "$CI_COMMIT_TAG" ]]; then EOS_IMAGE_TAG="$CI_COMMIT_SHORT_SHA.el9"; else EOS_IMAGE_TAG="$CI_COMMIT_TAG.el9"; EOS_VERSION=$(dnf -q --repofrompath=temprepo,https://storage-ci.web.cern.ch/storage-ci/eos/diopside/tag/testing/el-9/x86_64/ --repo=temprepo repoquery --qf "%{version}-%{release}\n" eos-client | grep "${CI_COMMIT_TAG}" | tail -1); if [[ -z ${EOS_VERSION} ]]; then echo "ERROR: Could not find EOS RPMs for ${CI_COMMIT_TAG} in storage-ci.web.cern.ch/storage-ci/eos/diopside/tag/testing/el-9/x86_64/" exit 1 fi fi # Extract XRootD version from eos.spec.in - xrd_min=$(grep '^%define xrootd_version_min' eos.spec.in | awk '{print $3}') - XROOTD_VERSION=$(dnf -q --repofrompath=temprepo,https://xrootd.web.cern.ch/repo/testing/el9/x86_64/ --repo=temprepo repoquery --qf "%{epoch}:%{version}-%{release}\n" xrootd | grep "${xrd_min}" | tail -1) # Get the latest stable CTA version - LATEST_CTA_VERSION=$(dnf -q --repofrompath=temprepo,https://cta-public-repo.web.cern.ch/stable/cta-5/el9/cta/x86_64/ --repo=temprepo repoquery --latest-limit=1 --qf "%{version}-%{release}" cta-taped) # Against an existing CTA tag, we can only trigger a pipeline where a different EOS image is used - echo "Triggering pipeline against CTA version $LATEST_CTA_VERSION with EOS image tag $EOS_IMAGE_TAG" - curl -X POST -F token=$CI_JOB_TOKEN -F ref=$CTA_BRANCH -F "variables[CUSTOM_EOS_IMAGE_TAG]=$EOS_IMAGE_TAG" -F "variables[CUSTOM_CTA_VERSION]=$LATEST_CTA_VERSION" -F "variables[PIPELINE_TYPE]=REGR_AGAINST_CTA_VERSION" $TRIGGER_URL # Against the CTA main branch, we can also test the client EOS and XRootD versions if we are on a (EOS) tag # If we are not on a (EOS) tag, the EOS RPMs are not accessible by the CTA pipeline, so we only test against the image - | if [[ -z "$CI_COMMIT_TAG" ]]; then echo "Triggering pipeline against CTA main branch" echo "Using EOS image tag $EOS_IMAGE_TAG and XRootD version $XROOTD_VERSION" curl -X POST \ -F token=$CI_JOB_TOKEN \ -F ref=$CTA_BRANCH \ -F "variables[CUSTOM_EOS_IMAGE_TAG]=$EOS_IMAGE_TAG" \ -F "variables[CUSTOM_XROOTD_VERSION]=$XROOTD_VERSION" \ -F "variables[PIPELINE_TYPE]=REGR_AGAINST_CTA_MAIN" \ $TRIGGER_URL else echo "Triggering pipeline against CTA main branch" echo "Using EOS image tag $EOS_IMAGE_TAG, EOS client version $EOS_VERSION and XRootD version $XROOTD_VERSION" curl -X POST \ -F token=$CI_JOB_TOKEN \ -F ref=$CTA_BRANCH \ -F "variables[CUSTOM_EOS_IMAGE_TAG]=$EOS_IMAGE_TAG" \ -F "variables[CUSTOM_EOS_VERSION]=$EOS_VERSION" \ -F "variables[CUSTOM_XROOTD_VERSION]=$XROOTD_VERSION" \ -F "variables[PIPELINE_TYPE]=REGR_AGAINST_CTA_MAIN" \ $TRIGGER_URL fi rules: - if: '$CI_COMMIT_TAG' when: on_success - when: manual allow_failure: true rpm_commit_artifacts: stage: publish image: gitlab-registry.cern.ch/linuxsupport/alma9-base needs: - job: build_el8 artifacts: true optional: true - job: build_el9 artifacts: true - job: build_el10 artifacts: true optional: true - job: build_el9_arm64 artifacts: true optional: true - job: build_fedora_38 artifacts: true optional: true script: - dnf install --nogpg -y sudo sssd-client createrepo - if [[ -n "$CI_COMMIT_TAG" ]]; then echo "This only works for commits"; exit 0; else BUILD_TYPE="commit"; fi - sudo -u stci -H ./gitlab-ci/store_artifacts.sh ${CODENAME} ${BUILD_TYPE} /eos/project/s/storage-ci/www/eos tags: - docker_node - publish except: - tags allow_failure: true when: manual rpm_testing_artifacts: stage: publish image: gitlab-registry.cern.ch/linuxsupport/alma9-base script: - dnf install --nogpg -y sudo sssd-client createrepo - if [[ -n "$CI_COMMIT_TAG" ]]; then BUILD_TYPE="tag/testing"; else BUILD_TYPE="commit"; fi - sudo -u stci -H ./gitlab-ci/store_artifacts.sh ${CODENAME} ${BUILD_TYPE} /eos/project/s/storage-ci/www/eos tags: - docker_node - publish only: - master - tags retry: 1 rpm_stable_artifacts: stage: publish image: gitlab-registry.cern.ch/linuxsupport/alma9-base script: - dnf install --nogpg -y sudo sssd-client createrepo - if [[ -n "$CI_COMMIT_TAG" ]]; then BUILD_TYPE="tag"; else echo "This only works for tags"; exit 0; fi - ./gitlab-ci/store_artifacts.sh ${CODENAME} ${BUILD_TYPE} /mnt/eos_repositories/eos - sudo -u stci -H ./gitlab-ci/store_stable_artifacts.sh ${CODENAME} /eos/project/s/storage-ci/www/eos ${CI_COMMIT_TAG} - echo ${CI_COMMIT_TAG} | sudo -u stci tee /eos/project/s/storage-ci/www/eos/${CODENAME}/tag/latest_version tags: - docker_node - publish only: - tags dependencies: [] when: manual #to be run after the rpm publish .publish_dockerimage-template: stage: publish image: name: gcr.io/kaniko-project/executor:debug entrypoint: [""] script: - if [[ -n "$CI_COMMIT_TAG" ]]; then export REPOBRANCH="tag-testing"; export DESTINATION="${IMAGE_REPO}:${CI_COMMIT_TAG}${OS_TAG}"; else export REPOBRANCH="commit"; export DESTINATION="${IMAGE_REPO}:${CI_COMMIT_SHORT_SHA}${OS_TAG}"; fi - echo "{\"auths\":{\"$CI_REGISTRY\":{\"auth\":\"$(echo -n $CI_REGISTRY_USER:$CI_REGISTRY_PASSWORD | base64)\"}}}" > /kaniko/.docker/config.json - /kaniko/executor --cache=false --destination $DESTINATION --dockerfile $DOCKERFILE --context $CI_PROJECT_DIR --build-arg=EOS_CODENAME="${CODENAME}" --build-arg=REPOBRANCH="${REPOBRANCH}" --compressed-caching=false --use-new-run retry: 1 el9_publish_dockerimage_all: extends: .publish_dockerimage-template variables: DOCKERFILE: eos-docker/minimal/el9_minimal.Dockerfile IMAGE_REPO: "gitlab-registry.cern.ch/dss/eos/eos-all" OS_TAG: ".el9" needs: - job: clone_docker - job: build_el9 allow_failure: true when: manual el9_publish_dockerimage_fusex: extends: .publish_dockerimage-template variables: DOCKERFILE: eos-docker/minimal/el9_minimal.fusex-only.Dockerfile IMAGE_REPO: "gitlab-registry.cern.ch/dss/eos/eos-fusex" OS_TAG: ".el9" needs: - job: clone_docker - job: build_el9 allow_failure: true when: manual #------------------------------------------------------------------------------- # RPM cleaning #------------------------------------------------------------------------------- clean_rpm_artifacts: stage: clean image: gitlab-registry.cern.ch/linuxsupport/alma9-base script: - dnf install --nogpg -y sssd-client sudo createrepo - sudo -u stci -H ./gitlab-ci/remove_old_artifacts.sh allow_failure: true only: - triggers - schedules tags: - docker_node # get all the namespaces, filter out the "mgmt" ones, delete if older than 30h clean_k8s_cluster: stage: clean image: alpine/k8s:1.18.2 script: - export KUBECONFIG=$K8S_CONFIG - set +o pipefail - kubectl get namespaces --no-headers | grep -v 'default\|kube-node-lease\|kube-public\|kube-system\|magnum-tiller' | awk 'match($3,/(([3-9][0-9]|[1-9][0-9][0-9]+)h|[1-9][0-9]*d)/) {print $1}' | xargs --no-run-if-empty kubectl delete namespaces dependencies: [] allow_failure: true only: - schedules tags: - docker_node - k8s # @todo cleanup helm leftover for failed / hanging tests. May be merged to 'clean_k8s_cluster' clean_helm_cluster: stage: clean image: gitlab-registry.cern.ch/dss/alpine-enhanced:3.13.5 script: - export KUBECONFIG=$K8S_CONFIG - echo "Please, implement me!" dependencies: [] allow_failure: true only: - schedules #------------------------------------------------------------------------------- # Manually triggered builds #------------------------------------------------------------------------------- .eos_nginx_build_template: stage: build:manual variables: PKG_MGR: dnf script: - ${PKG_MGR} install --nogpg -y gcc-c++ cmake make rpm-build which git sudo yum-utils createrepo sssd-client - cd nginx - ./makesrpm.sh - | if [[ ${PKG_MGR} == "yum" ]]; then ${PKG_MGR}-builddep -y --nogpgcheck *.src.rpm else ${PKG_MGR} install -y dnf-plugins-core ${PKG_MGR} builddep -y --nogpgcheck *.src.rpm fi - mkdir RPMS - rpmbuild --rebuild --define "_rpmdir RPMS/" --define "_build_name_fmt %%{NAME}-%%{VERSION}-%%{RELEASE}.%%{ARCH}.rpm" *.src.rpm - STORAGE_PATH=/eos/project/s/storage-ci/www/eos/${CODENAME}-depend/${BUILD_NAME}/x86_64 - sudo -u stci -H mkdir -p $STORAGE_PATH - sudo -u stci -H cp -f RPMS/*.rpm $STORAGE_PATH - sudo -u stci -H createrepo --update -q $STORAGE_PATH tags: - docker_node when: manual eos_nginx_el-8: extends: .eos_nginx_build_template image: gitlab-registry.cern.ch/linuxsupport/alma8-base variables: BUILD_NAME: el-8 eos_nginx_el-9: extends: .eos_nginx_build_template image: gitlab-registry.cern.ch/linuxsupport/alma9-base variables: BUILD_NAME: el-9 #------------------------------------------------------------------------------- # ALICE ApMon builds #------------------------------------------------------------------------------- .build-apmon-template: &build-apmon-template-definition stage: build:manual variables: PKG_MGR: dnf script: - ${PKG_MGR} install --nogpg -y gcc-c++ make rpm-build which git sssd-client sudo createrepo rsync tar gawk - cd ApMon; ./maketar.sh - rpmbuild --define "_source_filedigest_algorithm md5" --define "_binary_filedigest_algorithm md5" --define "_topdir ./rpmbuild" -ts eos-apmon-*.tar.gz - | if [[ ${PKG_MGR} == "yum" ]]; then ${PKG_MGR}-builddep -y --nogpgcheck rpmbuild/SRPMS/eos-apmon-*.src.rpm else ${PKG_MGR} install -y dnf-plugins-core ${PKG_MGR} builddep -y --nogpgcheck rpmbuild/SRPMS/eos-apmon-*.src.rpm fi - rpmbuild --rebuild --define "_rpmdir rpmbuild/RPMS/" --define "_build_name_fmt %%{NAME}-%%{VERSION}-%%{RELEASE}.%%{ARCH}.rpm" rpmbuild/SRPMS/eos-apmon-*.src.rpm - sudo -u stci -H mkdir -p ${STORAGE_PATH} - sudo -u stci -H cp -f rpmbuild/RPMS/*.rpm ${STORAGE_PATH} - sudo -u stci -H createrepo --update -q ${STORAGE_PATH} tags: - docker_node when: manual eos_apmon_el-8: image: gitlab-registry.cern.ch/linuxsupport/alma8-base variables: STORAGE_PATH: /eos/project/s/storage-ci/www/eos/${CODENAME}-depend/el-8/x86_64 extends: .build-apmon-template eos_apmon_el-9: image: gitlab-registry.cern.ch/linuxsupport/alma9-base variables: STORAGE_PATH: /eos/project/s/storage-ci/www/eos/${CODENAME}-depend/el-9/x86_64 extends: .build-apmon-template eos_docs: stage: build:manual image: gitlab-registry.cern.ch/linuxsupport/alma9-base script: - yum install --nogpg -y make python3-sphinx sssd-client sudo which git - cd doc - export PYTHONPATH=`pwd`/_themes/ - cd diopside - make html - make html - sudo kinit stci@CERN.CH -k -t /stci.krb5/stci.keytab - sudo -u stci -H rm -rf /eos/project/e/eos/www/docs/diopside/* - sudo -u stci -H cp -R _build/html/* /eos/project/e/eos/www/docs/diopside tags: - docker_node rules: - if: '$CI_COMMIT_TAG' when: on_success allow_failure: true - when: manual allow_failure: true .eos_repopackage: stage: build:manual image: gitlab-registry.cern.ch/linuxsupport/cc7-base script: - yum install --nogpg -y rpm-build sssd-client sudo createrepo - mkdir build - cd build - rpmbuild --bb --define "_rpmdir RPMS/" --define "_build_name_fmt %%{NAME}-%%{VERSION}-%%{RELEASE}.%%{ARCH}.rpm" ../elrepopackage.spec - STORAGE_PATH=/eos/project/s/storage-ci/www/eos/${CODENAME}/tag/el-7/x86_64 - sudo -u stci -H mkdir -p $STORAGE_PATH - sudo -u stci -H cp -f RPMS/*.rpm $STORAGE_PATH - sudo -u stci -H createrepo --update -q $STORAGE_PATH tags: - docker_node when: manual pre_commit: stage: pre-commit image: gitlab-registry.cern.ch/linuxsupport/alma10-base needs: [ ] variables: PIP_NO_CACHE_DIR: "1" before_script: - dnf install -y git python python-pip clang-tools-extra - python -m pip install --upgrade pip - pip install pre-commit script: # Run pre-commit against all files starting from a given commit to HEAD # This is done to avoid issues with large diffs when running pre-commit on all files (blocks the CI for ~10 minutes) # When (if) the whole is formatted, we can remove the `--from-ref` and `--to-ref` options to always check all files # pre-commit is run but clang-format will not be applied to all files, just to changed lines. # In this stage it will not run since there are no changes, so clang-format here will do nothing - pre-commit run --from-ref $(git merge-base origin/master HEAD) --to-ref HEAD --all-files # Now we compute the diff and run clang-format on the diff only, as if we had the hook installed during our commits. # This will modify the files in place, so you can copy-paste this command to fix the formatting issues locally and then commit the changes but if you have the hook installed you should never need to! - git diff -U0 $(git merge-base origin/master HEAD) HEAD | python3 utils/clang-format-diff.py -p1 -i # check if the previous command made any changes, if so, fail the job to enforce formatting - git diff --exit-code || (echo "Code is not properly formatted, please run the above command to fix the formatting issues and commit the changes." && exit 1) allow_failure: true # we could enable this soon but let's keep it optional for now until people have had time to adapt to the new formatting rules ================================================ FILE: .gitmodules ================================================ [submodule "namespace/ns_quarkdb/qclient"] path = namespace/ns_quarkdb/qclient url = https://gitlab.cern.ch/eos/qclient.git [submodule "mgm/cta_interface"] path = mgm/cta_interface url = https://gitlab.cern.ch/eos/xrootd-ssi-protobuf-interface.git [submodule "common/backward-cpp"] path = common/backward-cpp url = https://github.com/bombela/backward-cpp.git branch = master [submodule "common/xrootd-ssi-protobuf-interface"] path = common/xrootd-ssi-protobuf-interface url = https://:@gitlab.cern.ch:8443/eos/xrootd-ssi-protobuf-interface.git [submodule "unit_tests/googletest"] path = unit_tests/googletest url = https://github.com/google/googletest [submodule "common/grpc-proto"] path = common/grpc-proto url = https://:@gitlab.cern.ch:8443/eos/grpc-proto.git [submodule "common/jwt-cpp"] path = common/jwt-cpp url = https://github.com/Thalhammer/jwt-cpp.git [submodule "quarkdb"] path = quarkdb url = https://gitlab.cern.ch/eos/quarkdb.git [submodule "test/microbenchmarks/benchmark"] path = test/microbenchmarks/benchmark url = https://github.com/google/benchmark [submodule "common/cppzmq"] path = common/cppzmq url = https://github.com/zeromq/cppzmq.git [submodule "proto/eos-protobuf-spec"] path = proto/eos-protobuf-spec url = https://gitlab.cern.ch/eos/eos-protobuf-spec.git [submodule "fst/css_plugin"] path = fst/css_plugin url = https://gitlab.cern.ch/eos/css_plugin.git [submodule "console/parser"] path = console/parser url = https://github.com/CLIUtils/CLI11.git ================================================ FILE: .ignore ================================================ unit_tests/googletest/ common/fmt/ namespace/ns_quarkdb/qclient/src/fmt/ common/sqlite/ man/man1/ .vscode ================================================ FILE: .mailmap ================================================ Abhishek Lekshmanan Abhishek Lekshmanan Andrea Manzi Andreas Joachim Peters Andreas Joachim Peters Andreas Joachim Peters Andreas Joachim Peters Andreas Joachim Peters Andreas Joachim Peters Andreas Joachim Peters Andreas Joachim Peters Andreas Joachim Peters Andreas Stoeve Andreas Stoeve Andreas Stoeve Andreas Stoeve Andreas Stoeve Andreas Stoeve Andreea Prigoreanu Branko Blagojevic Cristian Contescu Crystal Chua Elvin Alin Sindrilaru Elvin Alin Sindrilaru Elvin Alin Sindrilaru Elvin Alin Sindrilaru Geoffray Adde Geoffray Adde Geoffray Adde Geoffray Adde Herve Rousseau Herve Rousseau Jaroslav Guenther Jaroslav Guenther Jaroslav Guenther Jozsef Makai Jozsef Makai Konstantinos Tsitsimpikos Konstantinos Tsitsimpikos Lukasz Janyst Manuel Reis Manuel Reis Michal Kamil Simon Mr Jenkins Paul Lensing Paul Lensing Paul Lensing Paul Lensing Steven Murray Unknown Unknown Unknown Unknown Unknown Unknown Unknown Unknown Unknown Unknown Unknown Unknown ================================================ FILE: .pre-commit-config.yaml ================================================ fail_fast: false repos: - repo: https://github.com/pre-commit/pre-commit-hooks rev: v6.0.0 hooks: - id: check-added-large-files - id: check-case-conflict - id: check-merge-conflict - id: check-symlinks - id: check-yaml - id: debug-statements - id: end-of-file-fixer - id: mixed-line-ending - id: requirements-txt-fixer - id: trailing-whitespace # # Disable for now and use clang-format-diff.py instead to only format changed lines. # - repo: https://github.com/pocc/pre-commit-hooks # rev: v1.3.5 # hooks: # - id: clang-format # name: clang-format # types_or: [ c, c++ ] # args: # - -i # stages: [ manual ] - repo: local hooks: - id: clang-format-diff name: clang-format-diff # We pipe 'git diff' to the python script. # -p1 strips the a/ b/ prefixes from the diff so the script finds the files. # -i applies the edits in-place. entry: bash -c 'git diff -U0 --no-color --cached | python3 utils/clang-format-diff.py -p1 -i' language: system types_or: [ c, c++ ] pass_filenames: false ================================================ FILE: AUDIT.md ================================================ ## EOS Audit Logging ### Overview EOS implements structured audit logging for successful operations that modify the namespace or file metadata. Audit entries are encoded as JSON (one record per line), written directly into ZSTD-compressed log segments, and rotated every 1 hour by default. A symlink `audit.zstd` always points to the current active segment. This document explains what is logged, the record format, where files are written, rotation behavior, how to parse the logs, and where audit hooks are integrated in the codebase. ### Scope: What gets logged - **Successful namespace-affecting operations by identified users**: - **Files**: CREATE, DELETE, RENAME/MOVE, TRUNCATE, WRITE (commit), UPDATE (open for write without create/truncate) - **Directories**: MKDIR, RMDIR, RENAME/MOVE - **Symlinks**: SYMLINK creation, DELETE - **Metadata**: CHMOD, CHOWN, SET_XATTR, RM_XATTR, SET_ACL - **Optional**: READ and LIST can be enabled later (not default; high volume). - **Excluded**: Failed attempts, internal non-human activities (e.g. purge/version housekeeping). ### Record format (protobuf → JSON) Each audit line is a JSON serialization of the `eos.audit.AuditRecord` protobuf (`proto/Audit.proto`). Key elements: - **Common fields** - `timestamp` (int64): seconds since epoch (server time) - `path` (string): absolute path to object; directory paths end with '/' - `operation` (enum): one of CREATE, DELETE, RENAME, WRITE, TRUNCATE, SET_XATTR, RM_XATTR, SET_ACL, CHMOD, CHOWN, MKDIR, RMDIR, SYMLINK, UPDATE - `client_ip` (string), `account` (string) - `auth` (mechanism string + attributes map) - `authorization` (reasons[]) - `trace_id` (string): server trace id - `target` (string): for rename/symlink target path - `uuid` (string): client/session id (empty if placeholder `xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx`) - `tid` (string): client trace identifier - `app` (string): client application - `svc` (string): emitting service (e.g. "mgm") - **State snapshots** - `before` / `after` (Stat): include `ctime`, `mtime`, `uid`, `gid`, `mode` (uint32), `mode_octal` (string), `size` (uint64), `checksum` (hex string for files) - `attrs` (repeated AttrChange): `{ name, before, after }` for xattr changes (non-system attributes) - **Nanosecond resolution times** - `Stat.ctime_ns` and `Stat.mtime_ns` provide full-resolution strings in the form `seconds.nanoseconds` (e.g. `1730985600.123456789`). - **Source and version metadata** - `src_file`, `src_line`: source file and line where the audit call originated - `version`: software version used when emitting the record Example JSON line (pretty-printed for readability): ```json { "timestamp": 1730985600, "path": "/eos/user/a/alice/data/file.txt", "operation": "WRITE", "client_ip": "192.0.2.10", "account": "alice", "auth": { "mechanism": "krb5", "attributes": {"principal": "alice@EXAMPLE.ORG"} }, "authorization": { "reasons": ["uid-match"] }, "trace_id": "srv-abc123", "uuid": "550e8400-e29b-41d4-a716-446655440000", "tid": "cli-xyz789", "app": "eoscp", "svc": "mgm", "before": { "ctime": 1730980000, "mtime": 1730981000, "uid": 1000, "gid": 1000, "mode": 420, "mode_octal": "0100644", "size": 1024, "checksum": "a1b2..." }, "after": { "ctime": 1730980000, "mtime": 1730985600, "ctime_ns": "1730980000.000000000", "mtime_ns": "1730985600.123456789", "uid": 1000, "gid": 1000, "mode": 420, "mode_octal": "0100644", "size": 4096, "checksum": "dead..." }, "src_file": "mgm/FuseServer/Server.cc", "src_line": 2600, "version": "" } ``` ### Log files, rotation, and location - **Location**: `/audit/` where `logdir` is derived from `XRDLOGDIR` (see `mgm/XrdMgmOfsConfigure.cc`). - Directory is created on startup if missing; mode 0755; owned appropriately by the service user. - **Active segment symlink**: `/audit/audit.zstd` points to the current segment file. - **Segments**: Files are ZSTD-compressed; rotated every 1 hour by default. - Override the rotation interval via environment variable: `EOS_AUDIT_ROTATION=` - Filenames include seconds for uniqueness: `audit-YYYYMMDD-HHMMSS.zst` - On rotation, the symlink is atomically updated to the new segment. ### ZSTD stream and flushing - On opening a new segment, the ZSTD frame header is flushed immediately to avoid `zstdcat` errors on empty files. - Each record is written and flushed so small bursts are visible promptly. ### Implementation details - `common/Audit.hh`, `common/Audit.cc` implement the audit writer: - Thread-safe writer with internal locking - Base directory configurable via `setBaseDirectory` or during construction - `audit(const AuditRecord&)` and a convenience overload to populate from `VirtualIdentity`, operation, path, etc. - Automatic rotation based on time; symlink management (`audit.zstd`) - Normalizes placeholder UUID to empty string ### READ and LIST auditing (optional) - **Disabled by default.** Enable only when needed due to potential volume. - **Enabling via API** (on `eos::common::Audit`): - `setReadAuditing(true|false)` — enable/disable READ auditing - `setListAuditing(true|false)` — enable/disable directory LIST auditing - **Suffix filter for READ auditing**: - By default, READ auditing applies to common document-style files: `txt, pdf, doc, docx, ppt, pptx, xls, xlsx, odt, ods, odp, rtf, csv, json, xml, yaml, yml, md, html, htm`. - Configure at runtime with `setReadAuditSuffixes({"pdf","docx",...})`. - If the vector contains `"*"`, all files are audited for READ (equivalent to `setReadAuditAll(true)`). - Matching is case-insensitive and based on the file extension of the path being opened. - **Where READ/LIST audits are emitted**: - READ: in `mgm/XrdMgmOfsFile.cc::open` for successful read-only opens (including 0-size files served by MGM) when enabled and suffix matches. - LIST: in `mgm/XrdMgmOfsDirectory.cc::_open` on successful directory opens when enabled. ### Default settings in XrdMgmOfs - The MGM reads environment variables at startup and applies them to the `Audit` instance: - Default mode (`EOS_MGM_AUDIT` unset or `default`): - Audit all modifications (CREATE, DELETE, RENAME, TRUNCATE, WRITE, UPDATE, metadata changes) - Audit READ for the default document-style suffix list - Do not audit LIST ### Per-directory attribute-based auditing (sys.audit) - When `EOS_MGM_AUDIT=attribute`, global auditing is disabled and auditing is enabled per directory via the extended attribute `sys.audit` set on the parent directory (for files) or the directory itself (for LIST). - Valid values for `sys.audit` (case-insensitive): - `none` / `no` / `false` / `off`: disable auditing for that directory - `modifications`: enable modifications only (CREATE/DELETE/RENAME/TRUNCATE/WRITE/UPDATE/metadata) - `default`: enable modifications and READ filtered by the default suffix list; LIST remains off - `detail`: enable modifications and READ for all files; LIST remains off - `all`: enable everything including LIST and READ for all files - Evaluation points: - Files: parent directory’s `sys.audit` - LIST: the directory’s own `sys.audit` - Notes: - `EOS_MGM_AUDIT=off` disables auditing completely; `sys.audit` is ignored. - In non-`attribute` modes, global settings control auditing; `sys.audit` is not used to override them. ### Environment configuration - `EOS_MGM_AUDIT` — control overall audit level (parsed in `XrdMgmOfs` and applied during configure): - `none`, `false`, `no`, `off`, or empty: disable all auditing - `default`: audit modifications and READ for default document suffixes (no LIST) - `modifications`: audit only modifications (no LIST, no READ) - `detail`: audit modifications and READ for all files (no LIST) - `all`: audit everything, including LIST and READ for all files - `attribute`: create the audit logger but disable all global auditing; auditing is enabled explicitly via `sys.audit` - `EOS_MGM_AUDIT_READ_SUFFIX` — override the READ suffix filter: - Comma-separated list, case-insensitive (e.g. `pdf,docx,json`) - Use `*` to audit READ for all files - If unset, the built-in default document-style list is used Notes: - Variables are parsed in `XrdMgmOfs` constructor and applied after the `Audit` instance is created in `XrdMgmOfsConfigure.cc`. - Setting `EOS_MGM_AUDIT=attribute` keeps the logger active while relying solely on per-directory `sys.audit` to enable auditing. - Setting `EOS_MGM_AUDIT=off` disables the logger entirely (no auditing). ### Integration points (where audits are emitted) - Core MGM (`mgm/`): - `XrdMgmOfs.hh`: `std::unique_ptr mAudit` member - `XrdMgmOfsConfigure.cc`: initializes `mAudit` with `/audit/` - Operations: - Files: `XrdMgmOfsFile.cc::open` (CREATE, TRUNCATE, UPDATE, READ), `fsctl/Commit.cc` (WRITE) - Directories: `Mkdir.cc` (MKDIR), `Remdir.cc` (RMDIR), `XrdMgmOfsDirectory.cc` (LIST) - Metadata: `Chmod.cc` (CHMOD), `Chown.cc` (CHOWN), `Attr.cc` (SET_XATTR, RM_XATTR) - Symlinks: `Link.cc` (SYMLINK) - Delete: `Rm.cc` (DELETE) - FUSE server (`mgm/FuseServer/Server.cc`): - Directories: `OpSetDirectory` (MKDIR, UPDATE/RENAME/MOVE; xattr changes), `OpDeleteDirectory` (RMDIR) - Files: `OpSetFile` (CREATE, UPDATE, RENAME/MOVE; CHMOD/CHOWN detection; xattr changes), `OpDeleteFile` (DELETE) - Symlinks: `OpSetLink` (SYMLINK), `OpDeleteLink` (DELETE) ### Directory path convention - Directory paths in audit entries include a trailing slash `/` for unambiguous parsing. ### Mode representation - `mode` is stored as an integer (uint32) and `mode_octal` as a string in octal for convenience. ### Parsing and tooling - Stream current audit records: ```bash zstdcat /audit/audit.zstd | jq '.' ``` - Follow audit logs across rotations (like `tail -F`): ```bash zstdtail /audit/audit.zstd # Or with filtering: zstdtail /audit/audit.zstd -- jq 'select(.operation == "DELETE")' ``` - Historical segments are named `audit-YYYYMMDD-HHMMSS.zst`. Each line is a standalone JSON record; consumers can ingest line-by-line. ### Testing and performance - Unit tests: `unit_tests/common/AuditTests.cc` - Rotation and symlink behavior - Benchmark: writes 100,000 records and measures elapsed time ### Notes and caveats - Only successful operations are logged. - READ/LIST are intentionally omitted by default due to volume; can be added later. - The audit writer flushes after each record for operational visibility; adjust if batching is later desired. ================================================ FILE: ApMon/AUTHORS ================================================ ================================================ FILE: ApMon/COPYING ================================================ GNU GENERAL PUBLIC LICENSE Version 2, June 1991 Copyright (C) 1989, 1991 Free Software Foundation, Inc. 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed. Preamble The licenses for most software are designed to take away your freedom to share and change it. By contrast, the GNU General Public License is intended to guarantee your freedom to share and change free software--to make sure the software is free for all its users. This General Public License applies to most of the Free Software Foundation's software and to any other program whose authors commit to using it. (Some other Free Software Foundation software is covered by the GNU Library General Public License instead.) You can apply it to your programs, too. When we speak of free software, we are referring to freedom, not price. Our General Public Licenses are designed to make sure that you have the freedom to distribute copies of free software (and charge for this service if you wish), that you receive source code or can get it if you want it, that you can change the software or use pieces of it in new free programs; and that you know you can do these things. To protect your rights, we need to make restrictions that forbid anyone to deny you these rights or to ask you to surrender the rights. These restrictions translate to certain responsibilities for you if you distribute copies of the software, or if you modify it. For example, if you distribute copies of such a program, whether gratis or for a fee, you must give the recipients all the rights that you have. You must make sure that they, too, receive or can get the source code. And you must show them these terms so they know their rights. We protect your rights with two steps: (1) copyright the software, and (2) offer you this license which gives you legal permission to copy, distribute and/or modify the software. Also, for each author's protection and ours, we want to make certain that everyone understands that there is no warranty for this free software. If the software is modified by someone else and passed on, we want its recipients to know that what they have is not the original, so that any problems introduced by others will not reflect on the original authors' reputations. Finally, any free program is threatened constantly by software patents. We wish to avoid the danger that redistributors of a free program will individually obtain patent licenses, in effect making the program proprietary. To prevent this, we have made it clear that any patent must be licensed for everyone's free use or not licensed at all. The precise terms and conditions for copying, distribution and modification follow. GNU GENERAL PUBLIC LICENSE TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION 0. This License applies to any program or other work which contains a notice placed by the copyright holder saying it may be distributed under the terms of this General Public License. The "Program", below, refers to any such program or work, and a "work based on the Program" means either the Program or any derivative work under copyright law: that is to say, a work containing the Program or a portion of it, either verbatim or with modifications and/or translated into another language. (Hereinafter, translation is included without limitation in the term "modification".) Each licensee is addressed as "you". Activities other than copying, distribution and modification are not covered by this License; they are outside its scope. The act of running the Program is not restricted, and the output from the Program is covered only if its contents constitute a work based on the Program (independent of having been made by running the Program). Whether that is true depends on what the Program does. 1. You may copy and distribute verbatim copies of the Program's source code as you receive it, in any medium, provided that you conspicuously and appropriately publish on each copy an appropriate copyright notice and disclaimer of warranty; keep intact all the notices that refer to this License and to the absence of any warranty; and give any other recipients of the Program a copy of this License along with the Program. You may charge a fee for the physical act of transferring a copy, and you may at your option offer warranty protection in exchange for a fee. 2. You may modify your copy or copies of the Program or any portion of it, thus forming a work based on the Program, and copy and distribute such modifications or work under the terms of Section 1 above, provided that you also meet all of these conditions: a) You must cause the modified files to carry prominent notices stating that you changed the files and the date of any change. b) You must cause any work that you distribute or publish, that in whole or in part contains or is derived from the Program or any part thereof, to be licensed as a whole at no charge to all third parties under the terms of this License. c) If the modified program normally reads commands interactively when run, you must cause it, when started running for such interactive use in the most ordinary way, to print or display an announcement including an appropriate copyright notice and a notice that there is no warranty (or else, saying that you provide a warranty) and that users may redistribute the program under these conditions, and telling the user how to view a copy of this License. (Exception: if the Program itself is interactive but does not normally print such an announcement, your work based on the Program is not required to print an announcement.) These requirements apply to the modified work as a whole. If identifiable sections of that work are not derived from the Program, and can be reasonably considered independent and separate works in themselves, then this License, and its terms, do not apply to those sections when you distribute them as separate works. But when you distribute the same sections as part of a whole which is a work based on the Program, the distribution of the whole must be on the terms of this License, whose permissions for other licensees extend to the entire whole, and thus to each and every part regardless of who wrote it. Thus, it is not the intent of this section to claim rights or contest your rights to work written entirely by you; rather, the intent is to exercise the right to control the distribution of derivative or collective works based on the Program. In addition, mere aggregation of another work not based on the Program with the Program (or with a work based on the Program) on a volume of a storage or distribution medium does not bring the other work under the scope of this License. 3. You may copy and distribute the Program (or a work based on it, under Section 2) in object code or executable form under the terms of Sections 1 and 2 above provided that you also do one of the following: a) Accompany it with the complete corresponding machine-readable source code, which must be distributed under the terms of Sections 1 and 2 above on a medium customarily used for software interchange; or, b) Accompany it with a written offer, valid for at least three years, to give any third party, for a charge no more than your cost of physically performing source distribution, a complete machine-readable copy of the corresponding source code, to be distributed under the terms of Sections 1 and 2 above on a medium customarily used for software interchange; or, c) Accompany it with the information you received as to the offer to distribute corresponding source code. (This alternative is allowed only for noncommercial distribution and only if you received the program in object code or executable form with such an offer, in accord with Subsection b above.) The source code for a work means the preferred form of the work for making modifications to it. For an executable work, complete source code means all the source code for all modules it contains, plus any associated interface definition files, plus the scripts used to control compilation and installation of the executable. However, as a special exception, the source code distributed need not include anything that is normally distributed (in either source or binary form) with the major components (compiler, kernel, and so on) of the operating system on which the executable runs, unless that component itself accompanies the executable. If distribution of executable or object code is made by offering access to copy from a designated place, then offering equivalent access to copy the source code from the same place counts as distribution of the source code, even though third parties are not compelled to copy the source along with the object code. 4. You may not copy, modify, sublicense, or distribute the Program except as expressly provided under this License. Any attempt otherwise to copy, modify, sublicense or distribute the Program is void, and will automatically terminate your rights under this License. However, parties who have received copies, or rights, from you under this License will not have their licenses terminated so long as such parties remain in full compliance. 5. You are not required to accept this License, since you have not signed it. However, nothing else grants you permission to modify or distribute the Program or its derivative works. These actions are prohibited by law if you do not accept this License. Therefore, by modifying or distributing the Program (or any work based on the Program), you indicate your acceptance of this License to do so, and all its terms and conditions for copying, distributing or modifying the Program or works based on it. 6. Each time you redistribute the Program (or any work based on the Program), the recipient automatically receives a license from the original licensor to copy, distribute or modify the Program subject to these terms and conditions. You may not impose any further restrictions on the recipients' exercise of the rights granted herein. You are not responsible for enforcing compliance by third parties to this License. 7. If, as a consequence of a court judgment or allegation of patent infringement or for any other reason (not limited to patent issues), conditions are imposed on you (whether by court order, agreement or otherwise) that contradict the conditions of this License, they do not excuse you from the conditions of this License. If you cannot distribute so as to satisfy simultaneously your obligations under this License and any other pertinent obligations, then as a consequence you may not distribute the Program at all. For example, if a patent license would not permit royalty-free redistribution of the Program by all those who receive copies directly or indirectly through you, then the only way you could satisfy both it and this License would be to refrain entirely from distribution of the Program. If any portion of this section is held invalid or unenforceable under any particular circumstance, the balance of the section is intended to apply and the section as a whole is intended to apply in other circumstances. It is not the purpose of this section to induce you to infringe any patents or other property right claims or to contest validity of any such claims; this section has the sole purpose of protecting the integrity of the free software distribution system, which is implemented by public license practices. Many people have made generous contributions to the wide range of software distributed through that system in reliance on consistent application of that system; it is up to the author/donor to decide if he or she is willing to distribute software through any other system and a licensee cannot impose that choice. This section is intended to make thoroughly clear what is believed to be a consequence of the rest of this License. 8. If the distribution and/or use of the Program is restricted in certain countries either by patents or by copyrighted interfaces, the original copyright holder who places the Program under this License may add an explicit geographical distribution limitation excluding those countries, so that distribution is permitted only in or among countries not thus excluded. In such case, this License incorporates the limitation as if written in the body of this License. 9. The Free Software Foundation may publish revised and/or new versions of the General Public License from time to time. Such new versions will be similar in spirit to the present version, but may differ in detail to address new problems or concerns. Each version is given a distinguishing version number. If the Program specifies a version number of this License which applies to it and "any later version", you have the option of following the terms and conditions either of that version or of any later version published by the Free Software Foundation. If the Program does not specify a version number of this License, you may choose any version ever published by the Free Software Foundation. 10. If you wish to incorporate parts of the Program into other free programs whose distribution conditions are different, write to the author to ask for permission. For software which is copyrighted by the Free Software Foundation, write to the Free Software Foundation; we sometimes make exceptions for this. Our decision will be guided by the two goals of preserving the free status of all derivatives of our free software and of promoting the sharing and reuse of software generally. NO WARRANTY 11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. 12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. END OF TERMS AND CONDITIONS How to Apply These Terms to Your New Programs If you develop a new program, and you want it to be of the greatest possible use to the public, the best way to achieve this is to make it free software which everyone can redistribute and change under these terms. To do so, attach the following notices to the program. It is safest to attach them to the start of each source file to most effectively convey the exclusion of warranty; and each file should have at least the "copyright" line and a pointer to where the full notice is found. Copyright (C) This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA Also add information on how to contact you by electronic and paper mail. If the program is interactive, make it output a short notice like this when it starts in an interactive mode: Gnomovision version 69, Copyright (C) year name of author Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'. This is free software, and you are welcome to redistribute it under certain conditions; type `show c' for details. The hypothetical commands `show w' and `show c' should show the appropriate parts of the General Public License. Of course, the commands you use may be called something other than `show w' and `show c'; they could even be mouse-clicks or menu items--whatever suits your program. You should also get your employer (if you work as a programmer) or your school, if any, to sign a "copyright disclaimer" for the program, if necessary. Here is a sample; alter the names: Yoyodyne, Inc., hereby disclaims all copyright interest in the program `Gnomovision' (which makes passes at compilers) written by James Hacker. , 1 April 1989 Ty Coon, President of Vice This General Public License does not permit incorporating your program into proprietary programs. If your program is a subroutine library, you may consider it more useful to permit linking proprietary applications with the library. If this is what you want to do, use the GNU Library General Public License instead of this License. ================================================ FILE: ApMon/ChangeLog ================================================ ================================================ FILE: ApMon/INSTALL ================================================ Installation Instructions ************************* Copyright (C) 1994, 1995, 1996, 1999, 2000, 2001, 2002, 2004 Free Software Foundation, Inc. This file is free documentation; the Free Software Foundation gives unlimited permission to copy, distribute and modify it. Basic Installation ================== These are generic installation instructions. The `configure' shell script attempts to guess correct values for various system-dependent variables used during compilation. It uses those values to create a `Makefile' in each directory of the package. It may also create one or more `.h' files containing system-dependent definitions. Finally, it creates a shell script `config.status' that you can run in the future to recreate the current configuration, and a file `config.log' containing compiler output (useful mainly for debugging `configure'). It can also use an optional file (typically called `config.cache' and enabled with `--cache-file=config.cache' or simply `-C') that saves the results of its tests to speed up reconfiguring. (Caching is disabled by default to prevent problems with accidental use of stale cache files.) If you need to do unusual things to compile the package, please try to figure out how `configure' could check whether to do them, and mail diffs or instructions to the address given in the `README' so they can be considered for the next release. If you are using the cache, and at some point `config.cache' contains results you don't want to keep, you may remove or edit it. The file `configure.ac' (or `configure.in') is used to create `configure' by a program called `autoconf'. You only need `configure.ac' if you want to change it or regenerate `configure' using a newer version of `autoconf'. The simplest way to compile this package is: 1. `cd' to the directory containing the package's source code and type `./configure' to configure the package for your system. If you're using `csh' on an old version of System V, you might need to type `sh ./configure' instead to prevent `csh' from trying to execute `configure' itself. Running `configure' takes awhile. While running, it prints some messages telling which features it is checking for. 2. Type `make' to compile the package. 3. Optionally, type `make check' to run any self-tests that come with the package. 4. Type `make install' to install the programs and any data files and documentation. 5. You can remove the program binaries and object files from the source code directory by typing `make clean'. To also remove the files that `configure' created (so you can compile the package for a different kind of computer), type `make distclean'. There is also a `make maintainer-clean' target, but that is intended mainly for the package's developers. If you use it, you may have to get all sorts of other programs in order to regenerate files that came with the distribution. Compilers and Options ===================== Some systems require unusual options for compilation or linking that the `configure' script does not know about. Run `./configure --help' for details on some of the pertinent environment variables. You can give `configure' initial values for configuration parameters by setting variables in the command line or in the environment. Here is an example: ./configure CC=c89 CFLAGS=-O2 LIBS=-lposix *Note Defining Variables::, for more details. Compiling For Multiple Architectures ==================================== You can compile the package for more than one kind of computer at the same time, by placing the object files for each architecture in their own directory. To do this, you must use a version of `make' that supports the `VPATH' variable, such as GNU `make'. `cd' to the directory where you want the object files and executables to go and run the `configure' script. `configure' automatically checks for the source code in the directory that `configure' is in and in `..'. If you have to use a `make' that does not support the `VPATH' variable, you have to compile the package for one architecture at a time in the source code directory. After you have installed the package for one architecture, use `make distclean' before reconfiguring for another architecture. Installation Names ================== By default, `make install' will install the package's files in `/usr/local/bin', `/usr/local/man', etc. You can specify an installation prefix other than `/usr/local' by giving `configure' the option `--prefix=PREFIX'. You can specify separate installation prefixes for architecture-specific files and architecture-independent files. If you give `configure' the option `--exec-prefix=PREFIX', the package will use PREFIX as the prefix for installing programs and libraries. Documentation and other data files will still use the regular prefix. In addition, if you use an unusual directory layout you can give options like `--bindir=DIR' to specify different values for particular kinds of files. Run `configure --help' for a list of the directories you can set and what kinds of files go in them. If the package supports it, you can cause programs to be installed with an extra prefix or suffix on their names by giving `configure' the option `--program-prefix=PREFIX' or `--program-suffix=SUFFIX'. Optional Features ================= Some packages pay attention to `--enable-FEATURE' options to `configure', where FEATURE indicates an optional part of the package. They may also pay attention to `--with-PACKAGE' options, where PACKAGE is something like `gnu-as' or `x' (for the X Window System). The `README' should mention any `--enable-' and `--with-' options that the package recognizes. For packages that use the X Window System, `configure' can usually find the X include and library files automatically, but if it doesn't, you can use the `configure' options `--x-includes=DIR' and `--x-libraries=DIR' to specify their locations. Specifying the System Type ========================== There may be some features `configure' cannot figure out automatically, but needs to determine by the type of machine the package will run on. Usually, assuming the package is built to be run on the _same_ architectures, `configure' can figure that out, but if it prints a message saying it cannot guess the machine type, give it the `--build=TYPE' option. TYPE can either be a short name for the system type, such as `sun4', or a canonical name which has the form: CPU-COMPANY-SYSTEM where SYSTEM can have one of these forms: OS KERNEL-OS See the file `config.sub' for the possible values of each field. If `config.sub' isn't included in this package, then this package doesn't need to know the machine type. If you are _building_ compiler tools for cross-compiling, you should use the `--target=TYPE' option to select the type of system they will produce code for. If you want to _use_ a cross compiler, that generates code for a platform different from the build platform, you should specify the "host" platform (i.e., that on which the generated programs will eventually be run) with `--host=TYPE'. Sharing Defaults ================ If you want to set default values for `configure' scripts to share, you can create a site shell script called `config.site' that gives default values for variables like `CC', `cache_file', and `prefix'. `configure' looks for `PREFIX/share/config.site' if it exists, then `PREFIX/etc/config.site' if it exists. Or, you can set the `CONFIG_SITE' environment variable to the location of the site script. A warning: not all `configure' scripts look for a site script. Defining Variables ================== Variables not defined in a site shell script can be set in the environment passed to `configure'. However, some packages may run configure again during the build, and the customized values of these variables may be lost. In order to avoid this problem, you should set them in the `configure' command line, using `VAR=value'. For example: ./configure CC=/usr/local2/bin/gcc will cause the specified gcc to be used as the C compiler (unless it is overridden in the site shell script). `configure' Invocation ====================== `configure' recognizes the following options to control how it operates. `--help' `-h' Print a summary of the options to `configure', and exit. `--version' `-V' Print the version of Autoconf used to generate the `configure' script, and exit. `--cache-file=FILE' Enable the cache: use and save the results of the tests in FILE, traditionally `config.cache'. FILE defaults to `/dev/null' to disable caching. `--config-cache' `-C' Alias for `--cache-file=config.cache'. `--quiet' `--silent' `-q' Do not print messages saying which checks are being made. To suppress all normal output, redirect it to `/dev/null' (any error messages will still be shown). `--srcdir=DIR' Look for the package's source code in directory DIR. Usually `configure' can determine that directory automatically. `configure' also accepts some other, not widely useful, options. Run `configure --help' for more details. ================================================ FILE: ApMon/Makefile ================================================ SPECFILE = $(shell find . -maxdepth 1 -type f -name '*.spec' ) DIST ?= $(shell rpm --eval %{dist}) RPMBUILD = $(shell pwd)/rpmbuild PACKAGE = $(shell awk '$$1 == "Name:" { print $$2 }' $(SPECFILE) ) VERSION = $(shell awk '$$1 == "Version:" { print $$2 }' $(SPECFILE) ) PERLDIR = $(shell perl -V:installsitearch | cut -d "'" -f 2) INSTALL ?= install DESTDIR ?= $(RPMBUILD)/BUILDROOT clean: rm -rf $(PACKAGE)-$(VERSION) rm -rf eos-apmon-*.tar.gz rm -rf $(RPMBUILD) dist: clean mkdir -p $(PACKAGE)-$(VERSION) rsync -aC --exclude '.__afs*' --exclude $(PACKAGE)-$(VERSION) . $(PACKAGE)-$(VERSION) tar cpfz ./$(PACKAGE)-$(VERSION).tar.gz $(PACKAGE)-$(VERSION) install: mkdir -p $(DESTDIR)/perl/ApMon/ApMon/ mkdir -p $(DESTDIR)/etc/logrotate.d/ mkdir -p $(DESTDIR)/opt/eos/apmon mkdir -p $(DESTDIR)/etc/sysconfig/ mkdir -p $(DESTDIR)/var/log/eos mkdir -p $(DESTDIR)/$(PERLDIR)/ApMon/ApMon mkdir -p $(DESTDIR)/usr/sbin/ mkdir -p $(DESTDIR)/usr/lib/systemd/system/ mkdir -p $(DESTDIR)/var/log/eos/apmon cd perl; for name in `find . -type f | grep -v svn`; do $(INSTALL) -m 755 $$name $(DESTDIR)/$(PERLDIR)/$$name; done $(INSTALL) -m 644 usr/lib/systemd/system/eosapmond.service $(DESTDIR)/usr/lib/systemd/system/ $(INSTALL) -m 755 opt/eos/apmon/eosapmond $(DESTDIR)/opt/eos/apmon/eosapmond $(INSTALL) -m 644 etc/logrotate.d/eosapmond $(DESTDIR)/etc/logrotate.d/eosapmond $(INSTALL) -m 755 run.sh $(DESTDIR)/opt/eos/apmon/run.sh prepare: dist mkdir -p $(RPMBUILD)/RPMS/$(DIST) mkdir -p $(RPMBUILD)/SRPMS/ mkdir -p $(RPMBUILD)/SPECS/ mkdir -p $(RPMBUILD)/SOURCES/ mkdir -p $(RPMBUILD)/BUILD/ cp eos-apmon-*.tar.gz $(RPMBUILD)/SOURCES cp $(SPECFILE) $(RPMBUILD)/SOURCES srpm: prepare $(SPECFILE) rpmbuild --define "_source_filedigest_algorithm md5" --define "_binary_filedigest_algorithm md5" \ --define "_topdir $(RPMBUILD)" -ts $(RPMBUILD)/SOURCES/eos-apmon-*.tar.gz rpm: srpm rpmbuild --rebuild --define "_rpmdir $(RPMBUILD)/RPMS/" \ --define "_build_name_fmt %%{NAME}-%%{VERSION}-%%{RELEASE}.%%{ARCH}.rpm" rpmbuild/SRPMS/eos-apmon-*.src.rpm ================================================ FILE: ApMon/NEWS ================================================ ================================================ FILE: ApMon/README ================================================ ================================================ FILE: ApMon/eos-apmon.spec ================================================ %{!?perl_sitearch: %define perl_sitearch %(eval "`%{__perl} -V:installsitearch`"; echo $installsitearch)} %define _unpackaged_files_terminate_build 0 %define __os_install_post /bin/true %define debug_package %{nil} Summary: eos-apmon package Name: eos-apmon Version: 1.1.13 Release: 1%{?dist} URL: none Source0: %{name}-%{version}.tar.gz License: OpenSource Group: Applications/Eos BuildRequires: systemd-rpm-macros Requires: perl %description This package contains service scripts for ML monitoring in EOS The service is started via systemd systemctl start | stop | status | restart eosapmond.service The initd scripts were done by Andreas-Joachim Peters [CERN] (EMAIL: andreas.joachim.peters@cern.ch). %prep %setup -q %install rm -rf %{buildroot} mkdir -p %{buildroot} %{__make} install DESTDIR=%{buildroot} %post %systemd_post eosapmond.service %preun %systemd_preun eosapmond.service %postun %systemd_postun_with_restart eosapmond.service %files %defattr(-,root,root) /%{_unitdir}/eosapmond.service /etc/logrotate.d/eosapmond %{perl_sitearch}/ApMon/ /opt/eos/apmon/eosapmond /opt/eos/apmon/run.sh %changelog * Mon Apr 28 2025 Martin Vala - 1.1.13-1 - Xrootd version is parsed from eos-xrootd package * Wed Mar 19 2025 Gianmaria Del Monte - 1.1.12-1 - Move to systemd service * Fri Jan 26 2024 Volodymyr Yurchenko - 1.1.11-1 - install systemd unit file compatible with Alma 9 * Wed Aug 4 2021 Elvin Sindrilaru - 1.1.10-1 - move the apmon logs out of the EOS FST owned directory and place them in /var/log/eos/apmon/ - bump version to 1.1.10 * Fri Dec 6 2019 Cristian Contescu - 1.1.9-1 - add fix for interface detection (fix traffic reporting) * Wed Apr 2 2014 root - 1.1.4-1 - add "_xrootd_" to the instance name - fix RPM version discovery for EOS and XRootD packages * Mon Mar 12 2011 root - 1.1.0-0 - Initial build. ================================================ FILE: ApMon/etc/logrotate.d/eosapmond ================================================ /var/log/eos/apmon/apmon.log { missingok daily copytruncate create 755 root root dateext rotate 200 compress } ================================================ FILE: ApMon/jenkins-build.sh ================================================ #!/bin/bash #------------------------------------------------------------------------------- # @author Elvin-Alin Sindrilaru - CERN # @brief Script used by Jenkins to build EOS ApMon rpms #------------------------------------------------------------------------------- #************************************************************************ # * EOS - the CERN Disk Storage System * # * Copyright (C) 2016 CERN/Switzerland * # * * # * This program is free software: you can redistribute it and/or modify * # * it under the terms of the GNU General Public License as published by * # * the Free Software Foundation, either version 3 of the License, or * # * (at your option) any later version. * # * * # * This program is distributed in the hope that it will be useful, * # * but WITHOUT ANY WARRANTY; without even the implied warranty of * # * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * # * GNU General Public License for more details. * # * * # * You should have received a copy of the GNU General Public License * # * along with this program. If not, see .* # ************************************************************************/ #------------------------------------------------------------------------------- # Print help #------------------------------------------------------------------------------- function printHelp() { echo "Usage: " 1>&2 echo "${0} " 1>&2 echo " branch name in the form of \"origin/master\" or tag" 1>&2 echo " name e.g. 1.0.0 for which to build the project " 1>&2 echo " XRootD tag version used for this build " 1>&2 echo " build number value passed in by Jenkins " 1>&2 echo " build platform e.g. slc-6, el-7, fc-24 " 1>&2 echo " build architecture e.g. x86_64, i386 " 1>&2 echo " destination path for the rpms built " 1>&2 } #------------------------------------------------------------------------------- # Get the local branch name and dist tag for the rpms. For example local branch # name of branch 'origin/master' is master. The dist tag for Scientific Linux 5 # can be 'slc5' or 'el5'. # Function sets two global variables BRANCH and DIST. #------------------------------------------------------------------------------- function getLocalBranchAndDistTag() { if [[ ${#} -ne 2 ]]; then echo "Usage: " 1>&2 echo "${0} " 1>&2 echo " branch name in the form of \"origin/master\" or tag" 1>&2 echo " name e.g. 1.0.0 for which to build the project " 1>&2 echo " build platform e.g. slc-6, el-7, fc-24 " 1>&2 exit 1 fi local BRANCH_OR_TAG=${1} local PLATORM=${2} local TAG_REGEX="^[04]+\..*$" local TAG_REGEX_CITRINE="^4.*$" # If this is a tag get the branch it belogs to if [[ "${BRANCH_OR_TAG}" =~ ${TAG_REGEX} ]]; then if [[ "${BRANCH_OR_TAG}" =~ ${TAG_REGEX_CITRINE} ]]; then BRANCH="citrine" fi else BRANCH=$(basename ${BRANCH_OR_TAG}) if [[ "${BRANCH}" == "master" ]]; then BRANCH="citrine" fi fi # For any other branch use the latest XRootD release XROOTD_TAG="v4.3.0" DIST=".${PLATFORM}" # Remove any "-" from the dist tag DIST="${DIST//-}" echo "Local branch: ${BRANCH}" echo "Dist tag: ${DIST} " } #------------------------------------------------------------------------------- # Main - when we are called the current BRANCH_OR_TAG is already checked-out and # the script must be run from the **same directory** where it resides. #------------------------------------------------------------------------------- if [[ ${#} -ne 6 ]]; then printHelp exit 1 fi BRANCH_OR_TAG=${1} XROOTD_TAG=${2} BUILD_NUMBER=${3} PLATFORM=${4} ARCHITECTURE=${5} DST_PATH=${6} echo "Build number: ${BUILD_NUMBER}" echo "Branch or tag: ${BRANCH_OR_TAG}" echo "XRootD tag: ${XROOTD_TAG}" echo "Build platform: ${PLATFORM}" echo "Build architecture: ${ARCHITECTURE}" echo "Destination path: ${DST_PATH}" echo "Running in directory: $(pwd)" # Get local branch and dist tag for the RPMS getLocalBranchAndDistTag ${BRANCH_OR_TAG} ${PLATFORM} # Move to ApMon directory and create the tarball cd ApMon ./maketar.sh # Build the source RPM rpmbuild --define "_source_filedigest_algorithm md5" --define "_binary_filedigest_algorithm md5" --define "_topdir ./rpmbuild" -ts eos-apmon-*.tar.gz # Move the source RPM mv rpmbuild/SRPMS/eos-apmon-*.src.rpm . # Get the mock configurations from gitlab git clone ssh://git@gitlab.cern.ch:7999/dss/dss-ci-mock.git ../dss-ci-mock # Prepare the mock configuration cat ../dss-ci-mock/eos-templates/${PLATFORM}-${ARCHITECTURE}.cfg.in | sed "s/__XROOTD_TAG__/${XROOTD_TAG}/" | sed "s/__BUILD_NUMBER__/${BUILD_NUMBER}/" > eos.cfg # Build the RPMs mock --yum --init --uniqueext="eos-apmon01" -r ./eos.cfg --rebuild ./eos-apmon-*.src.rpm --resultdir ../rpms -D "dist ${DIST}" # List of branches for CI YUM repo BRANCH_LIST=('citrine') # If building one of the production branches then push rpms to YUM repo if [[ ${BRANCH_LIST[*]} =~ $BRANCH ]] ; then cd ../rpms/ # Make sure the directories are created and rebuild the YUM repo YUM_REPO_PATH="${DST_PATH}/${BRANCH}/tag/${PLATFORM}/${ARCHITECTURE}" echo "Save ApMon RPMs in YUM repo: ${YUM_REPO_PATH}" aklog mkdir -p ${YUM_REPO_PATH} cp -f *.rpm ${YUM_REPO_PATH} createrepo --update -q ${YUM_REPO_PATH} else echo "RPMs for branch ${BRANCH} are NOT saved in any YUM repository!" fi ================================================ FILE: ApMon/maketar.sh ================================================ #!/bin/sh # Extract package related information specfile=`find . -maxdepth 1 -name '*.spec' -type f` name=`awk '$1 == "Name:" { print $2 }' ${specfile}` version=`awk '$1 == "Version:" { print $2 }' ${specfile}` # Create the distribution tarball rm -rf ${name}-${version} rsync -aC --exclude '.__afs*' . ${name}-${version} tar -zcf ${name}-${version}.tar.gz ${name}-${version} rm -rf ${name}-${version} ================================================ FILE: ApMon/opt/eos/apmon/eosapmond ================================================ #!/usr/bin/perl # apmonpl if (@ARGV != 6) { print "Usage: $0 "; exit(1); } # Redirect stdout and stderr to log file if (!open(STDOUT, ">>", $ARGV[1])) { print STDERR "ERROR: cannot stream stdout into $ARGV[1]\n"; exit(-1); } STDOUT->autoflush(1); if (!open(STDERR, ">>", $ARGV[1])) { print STDERR "ERROR: cannot stream stderr into $ARGV[1]\n"; exit(-1); } STDERR->autoflush(1); use POSIX qw(setsid); my $sid = setsid(); if ($sid < 0) { print STDERR "ERROR: failed to create new session (setsid())\n"; exit(-1); } use strict; use warnings; use ApMon; my $apm = new ApMon(0); my $now = `date`; chomp $now; printf "# Starting at $now\n"; select STDOUT; $| = 1; select STDERR; $| = 1; $apm->setLogLevel($ARGV[2]); $apm->setDestinations(["$ARGV[0]"]); $apm->setMonitorClusterNode("$ARGV[3]_xrootd_Nodes", "$ARGV[4]"); $apm->addJobToMonitor($ARGV[5], '', 'xrootd_Services', "$ARGV[4]"); while(1){ $apm->sendBgMonitoring(); sleep(120); } ================================================ FILE: ApMon/perl/ApMon/ApMon/BgMonitor.pm ================================================ package ApMon::BgMonitor; use strict; use warnings; use ApMon::Common qw(logger); use ApMon::ProcInfo; use Data::Dumper; use Net::Domain; # Settings for Data::Dumper's dump of last values $Data::Dumper::Indent = 1; $Data::Dumper::Purity = 1; # Background Monitor constructor sub new { my ($type, $cmdPipe, $confFile, $lastValuesFile, $allowBgProcs, $confLoader) = @_; my $this = {}; bless $this; $this->{CMD_PIPE} = $cmdPipe; $this->{CONF_FILE} = $confFile; $this->{LAST_VALUES_FILE} = $lastValuesFile; $this->{ALLOW_BG_PROCESSES} = $allowBgProcs; $this->{CONFIG_LOADER} = $confLoader; $this->{LAST_CONF_CHECK_TIME} = 0; $this->{CONF_RECHECK} = 1; $this->{CONF_CHECK_INTERVAL} = 20; $this->{SEND_BG_MONITORING} = 0; my $hostname = Net::Domain::hostfqdn(); $this->{BG_MONITOR_CLUSTER} = "ApMon_SysMon"; $this->{BG_MONITOR_NODE} = $hostname; $this->{JOBS} = {}; $this->{PROC_INFO} = new ApMon::ProcInfo(); return $this; } # This call will never return! # It should be a used just from a child process whose role is just background monitoring. # In order to report data, user has to send a bg_enable message to enable this. sub run { my $this = shift; my $userMsg = ""; sleep(1); while(1) { $userMsg = ApMon::Common::readMessage($this->{CMD_PIPE}); $this->parseParentMessage($userMsg) if $userMsg; # use $this->{CMD_PIPE} channel to get messages from user $this->sendBgMonitoring() if $this->{SEND_BG_MONITORING}; sleep(10); # updates sould never be more often than this! } } # Registers another job for monitoring. This can be called by user or from readMessage. sub addJobToMonitor { my ($this, $pid, $workDir, $clusterName, $nodeName) = @_; $this->{JOBS}->{$pid}->{CLUSTER} = $clusterName; $this->{JOBS}->{$pid}->{NODE} = $nodeName; $this->{PROC_INFO}->addJobToMonitor($pid, $workDir); } # Removes a job from the monitored processes. This can be called either by user or by readMessage. sub removeJobToMonitor { my ($this, $pid) = @_; delete $this->{JOBS}->{$pid}; $this->{PROC_INFO}->removeJobToMonitor($pid); } # Sets the default cluster and node name for the system-related information. sub setMonitorClusterNode { my ($this, $cluster, $node) = @_; $this->{BG_MONITOR_CLUSTER} = $cluster; $this->{BG_MONITOR_NODE} = $node; } # Enables or disables sending of monitoring info sub enableBgMonitoring { my ($this, $enable) = @_; $this->{SEND_BG_MONITORING} = $enable; } # Sets the log level for BG_MONITOR sub setLogLevel { my ($this, $level) = @_; ApMon::Common::setLogLevel($level); } # Sets the maximum rate for the messages sent by user sub setMaxMsgRate { my ($this, $rate) = @_; ApMon::Common::setMaxMsgRate($rate); } # Sets the SI2k meter for this machine sub setCpuSI2k { my ($this, $si2k) = @_; ApMon::Common::setCpuSI2k($si2k); } # Sets the cpu speed as the one detected when probing cpu type for si2k sub setCpuMHz { my ($this, $mhz) = @_; $ApMon::Common::CpuMHz = $mhz; } # This is used only if BgMonitor is used as a dedicated monitoring process in order to interpret # messages from parent process. sub parseParentMessage { my ($this, $msg) = @_; my ($pid, $workDir, $cluster, $node); my @msgs = split(/\n/, $msg); for $msg (@msgs){ $this->setLogLevel($1) if $msg =~ /loglevel:(.*)/; $this->setMaxMsgRate($1) if $msg =~ /maxMsgRate:(.*)/; $this->enableBgMonitoring($1) if $msg =~ /bg_enable:(.*)/; $this->setCpuSI2k($1) if $msg =~ /cpu_si2k:(.*)/; $this->setCpuMHz($1) if $msg =~ /cpu_mhz:(.*)/; $pid = $1 if $msg =~ /pid:(.*)/; $this->removeJobToMonitor($1) if $msg =~ /rm_pid:(.*)/; $workDir = $1 if $msg =~ /work_dir:(.*)/; $cluster = $1 if $msg =~ /bg_cluster:(.*)/; if($msg =~ /bg_node:(.*)/){ $node = $1; if(defined $pid){ $this->addJobToMonitor($pid, $workDir, $cluster, $node); undef $pid; undef $cluster; } if(defined $cluster){ $this->setMonitorClusterNode($cluster, $node); undef $cluster; } } } } # This will send the background information to the interested listeners. It is called either from backgroundMonitor # or directly by the user from time to time to avoid having a sepparate process for this task. # information is about the system (load, network, memory etc.) and about a number of jobs (PIDs). # # If $mustSend is != 0, the bgMonitoring data is sent regardles of when it was last time sent. This allows # sending a 'last result', just before the end of a job, and which can happen anytime. sub sendBgMonitoring { my $this = shift; my $mustSend = shift || 0; ApMon::Common::updateConfig($this); my (@crtSysParams, @crtJobParams, $now, @sys_results, @job_results, $optsRef, $prevRawData); $now = time; my $updatedProcInfo = 0; for my $dest (keys %{$this->{DESTINATIONS}}) { $optsRef = $this->{DESTINATIONS}->{$dest}->{OPTS}; $prevRawData = $this->{DESTINATIONS}->{$dest}->{PREV_RAW_DATA}; @crtSysParams = (); @crtJobParams = (); # for each destination and its options, check if we have to do any background monitoring if($optsRef->{'sys_monitoring'} and ($mustSend or $optsRef->{'sys_data_sent'} + $optsRef->{'sys_interval'} <= $now)){ for my $param (keys %$optsRef){ if($param =~ /^sys_(.+)/ and $optsRef->{$param}){ push(@crtSysParams, $1) unless ($1 eq 'monitoring') or ($1 eq 'interval') or ($1 eq 'data_sent'); } } $optsRef->{'sys_data_sent'} = $now; } if($optsRef->{'job_monitoring'} and ($mustSend or $optsRef->{'job_data_sent'} + $optsRef->{'job_interval'} <= $now)){ for my $param (keys %$optsRef){ if($param =~ /^job_(.+)/ and $optsRef->{$param}){ push(@crtJobParams, "$1") unless ($1 eq 'monitoring') or ($1 eq 'interval') or ($1 eq 'data_sent'); } } $optsRef->{'job_data_sent'} = $now; } if($optsRef->{'general_info'} and ($mustSend or $optsRef->{'general_data_sent'} + 2 * $optsRef->{'sys_interval'} <= $now)){ for my $param (keys %$optsRef){ if(!($param =~ /^sys_/) and !($param =~ /^job_/) and ($optsRef->{$param})){ push(@crtSysParams, $param) unless ($param eq 'general_info') or ($param eq 'general_data_sent'); } } $optsRef->{'general_data_sent'} = $now; } if((! $updatedProcInfo) and (@crtSysParams > 0 or @crtJobParams > 0)){ $this->{PROC_INFO}->update(); $updatedProcInfo = 1; } @sys_results = ( @crtSysParams ? $this->{PROC_INFO}->getSystemData(\@crtSysParams, $prevRawData) : () ); if(@sys_results){ ApMon::Common::directSendParameters($dest, $this->{BG_MONITOR_CLUSTER}, $this->{BG_MONITOR_NODE}, -1, \@sys_results); $this->{LAST_VALUES}->{BG_MON_VALUES} = {} if ! $this->{LAST_VALUES}->{BG_MON_VALUES}; $this->update_hash($this->{LAST_VALUES}->{BG_MON_VALUES}, \@sys_results); } for my $pid (keys %{$this->{JOBS}}){ @job_results = ( @crtJobParams ? $this->{PROC_INFO}->getJobData($pid, \@crtJobParams) : () ); if(@job_results){ ApMon::Common::directSendParameters($dest, $this->{JOBS}->{$pid}->{CLUSTER},$this->{JOBS}->{$pid}->{NODE},-1,\@job_results); $this->{LAST_VALUES}->{JOBS}->{$pid}->{BG_MON_VALUES} = {} if ! $this->{LAST_VALUES}->{JOBS}->{$pid}->{BG_MON_VALUES}; $this->update_hash($this->{LAST_VALUES}->{JOBS}->{$pid}->{BG_MON_VALUES}, \@job_results); } } } if(open(F, ">$this->{LAST_VALUES_FILE}")){ print F Dumper($this->{LAST_VALUES}); close F; chmod(0600, $this->{LAST_VALUES_FILE}); }else{ logger("WARNING", "Cannot save last BgMonitored values to $this->{LAST_VALUES_FILE}"); } } # update in the given hash the rest of pa sub update_hash { my $this = shift; my $hash = shift || {} ; my $params = shift; @$params & 1 and logger("WARNING", "Odd number of parameters in update_hash") and return; while(@$params){ my $key = shift(@$params); my $val = shift(@$params); $hash->{$key} = $val; } } 1; ================================================ FILE: ApMon/perl/ApMon/ApMon/Common.pm ================================================ package ApMon::Common; use strict; use warnings; require Exporter; use Carp qw(cluck); use Socket; use ApMon::XDRUtils; use Data::Dumper; use Sys::Hostname; use vars qw(@ISA @EXPORT @EXPORT_OK $APMON_DEFAULT_PORT $VERSION %defaultOptions $KSI2K $CpuMHz); push @ISA, qw(Exporter); push @EXPORT, qw(logger); push @EXPORT_OK, qw($APMON_DEFAULT_PORT %defaultOptions); $VERSION = "2.2.18"; $APMON_DEFAULT_PORT = 8884; my @LOG_LEVELS = ("DEBUG", "NOTICE", "INFO", "WARNING", "ERROR", "FATAL"); my $CRT_LOGLEVEL = 2; # index in the array above my $MAX_MSG_RATE = 20; # Default value for max nr. of messages that user is allowed to send, per second $KSI2K = undef; # kilo spec ints 2k for this machine $CpuMHz = undef; # Cpu Speed when taking the speed for KSI2k # Default options for background monitoring %defaultOptions = ( 'job_monitoring' => 1, # perform (or not) job monitoring 'job_interval' => 60, # at this interval (in seconds) 'job_data_sent' => 0, # time from Epoch when job information was sent; don't touch! 'job_cpu_time' => 1, # processor time spent running this job in seconds 'job_cpu_ksi2k' => 1, # used CPU power in ksi2k units (see SpecInt2000 for details); 'job_run_time' => 1, # elapsed time from the start of this job in seconds 'job_run_ksi2k' => 1, # elapsed time in ksi2k units 'job_cpu_usage' => 1, # current percent of the processor used for this job, as reported by ps 'job_virtualmem' => 1, # size in JB of the virtual memory occupied by the job, as reported by ps 'job_rss' => 1, # size in KB of the resident image size of the job, as reported by ps 'job_mem_usage' => 1, # percent of the memory occupied by the job, as reported by ps 'job_workdir_size' => 1, # size in MB of the working directory of the job 'job_disk_total' => 1, # size in MB of the total size of the disk partition containing the working directory 'job_disk_used' => 1, # size in MB of the used disk partition containing the working directory 'job_disk_free' => 1, # size in MB of the free disk partition containing the working directory 'job_disk_usage' => 1, # percent of the used disk partition containing the working directory 'job_open_files' => 1, # number of open file descriptors 'job_page_faults_min' => 1, # number of minor page faults in the job 'job_page_faults_maj' => 1, # number of major page faults in the job 'sys_monitoring' => 1, # perform (or not) system monitoring 'sys_interval' => 60, # at this interval (in seconds) 'sys_data_sent' => 0, # time from Epoch when system information was sent; don't touch! 'sys_cpu_usr' => 1, # cpu-usage information 'sys_cpu_sys' => 1, # all these will produce coresponding paramas without "sys_" 'sys_cpu_nice' => 1, 'sys_cpu_idle' => 1, 'sys_cpu_iowait' => 1, 'sys_cpu_irq' => 1, 'sys_cpu_softirq' => 1, 'sys_cpu_steal' => 1, 'sys_cpu_guest' => 1, 'sys_cpu_usage' => 1, 'sys_interrupts' => 1, 'sys_context_switches' => 1, 'sys_load1' => 1, # system load information 'sys_load5' => 1, 'sys_load15' => 1, 'sys_mem_used' => 1, # memory usage information 'sys_mem_free' => 1, 'sys_mem_actualfree' => 1, # actually free memory: free + cached + buffers 'sys_mem_usage' => 1, 'sys_mem_buffers' => 1, 'sys_mem_cached' => 1, 'sys_blocks_in' => 1, 'sys_blocks_out' => 1, 'sys_swap_used' => 1, # swap usage information 'sys_swap_free' => 1, 'sys_swap_usage' => 1, 'sys_swap_in' => 1, 'sys_swap_out' => 1, 'sys_net_in' => 1, # network transfer in kBps 'sys_net_out' => 1, # these will produce params called ethX_in, ethX_out, ethX_errs 'sys_net_errs' => 1, # for each eth interface 'sys_net_sockets' => 1, # number of opened sockets for each proto => sockets_tcp/udp/unix ... 'sys_net_tcp_details' => 1, # number of tcp sockets in each state => sockets_tcp_LISTEN, ... 'sys_processes' => 1, # total processes and processs in each state (R, S, D ...) 'sys_uptime' => 1, # uptime of the machine, in days (float number) 'general_info' => 1, # send (or not) general host information once every 2 $sys_interval seconds 'general_data_sent' => 0, # time from Epoch when general information was sent; don't touch! 'hostname' => 1, 'ip' => 1, # will produce _ip params for each physical interface 'ipv6' => 1, # will produce _ipv6 params for each physical interface 'kernel_version' => 1, 'eos_rpm_version' => 1, 'xrootd_rpm_version' => 1, 'platform' => 1, 'os_type' => 1, 'cpu_MHz' => 1, 'no_CPUs' => 1, # number of CPUs 'ksi2k_factor' => 1, # system's ksi2k factor, if known 'total_mem' => 1, 'total_swap' => 1, 'cpu_vendor_id' => 1, 'cpu_family' => 1, 'cpu_model' => 1, 'cpu_model_name' => 1, 'cpu_cache' => 1, 'bogomips' => 1); # Create a UDP socket through which all information is sent if(! socket(SOCKET, PF_INET, SOCK_DGRAM, getprotobyname("udp"))){ logger("FATAL", "Cannot create UDP socket $@"); die; } # Simple logger sub logger { my ($level, $msg) = @_; my $i = 0; $i++ while (! ($LOG_LEVELS[$i] eq $level) and ($i < @LOG_LEVELS)); if($CRT_LOGLEVEL <= $i and $i < @LOG_LEVELS){ my $now =localtime(); $now =~ s/^\S+\s((\S+\s+){3}).*$/$1/; print $now."ApMon[$LOG_LEVELS[$i]]: $msg\n"; } } # Sets the CRT_LOGLEVEL sub setLogLevel { my $level = shift; logger("NOTICE", "Setting loglevel to $level"); if(! defined $level){ cluck("got undefined level from"); return; } my $i = 0; $i++ while (! ($LOG_LEVELS[$i] eq $level) and ($i < @LOG_LEVELS)); if($i < @LOG_LEVELS){ $CRT_LOGLEVEL = $i; }else{ logger("WARNING", "Unknown log level \"$level\" - ignoring.\n"); } } # Sets the maximum rate for sending messages (see shouldSend subroutine) sub setMaxMsgRate { my $rate = shift; $MAX_MSG_RATE = $rate; logger("INFO", "Setting maxMsgRate to $rate"); } # For each destination, we'll keep a pair (instance_id, seq_nr) that will identify us my $senderRef = {}; my $instance_id = getInstanceID(); # This is used internally to send a set of parameters to a given destination. sub directSendParameters { my ($destination, $clusterName, $nodeName, $time, $paramsRef) = @_; my @params; if(! defined($paramsRef)){ logger("WARNING", "Not sending undefined parameters!"); return; } if(! defined($time)){ logger("WARNING", "Not sending the parameters for an undefined time!"); return; } if(! shouldSend()){ #logger("WARNING", "Not sending since the messages are too often!"); return; } if(ref($paramsRef->[0]) eq "ARRAY"){ @params = @{$paramsRef->[0]}; }elsif(ref($paramsRef->[0]) eq "HASH"){ @params = %{$paramsRef->[0]}; }else{ @params = @$paramsRef; } if(@params == 0){ return; } $senderRef->{$destination} = {INSTANCE_ID => $instance_id, SEQ_NR => 0} if ! $senderRef->{$destination}; my $sender = $senderRef->{$destination}; $sender->{INSTANCE_ID} = ($$ << 16) | ($sender->{INSTANCE_ID} && 0xffff); $sender->{SEQ_NR} = ($sender->{SEQ_NR} + 1) % 2_000_000_000; # wrap around 2 mld my ($host, $port, $pass) = split(/:/, $destination); logger("NOTICE", "====> $host|$port|$pass/$clusterName/$nodeName".($time != -1 ? " @ $time" : "")." [$sender->{SEQ_NR} # $sender->{INSTANCE_ID}]"); for(my $i = 0; $i < @params; $i += 2){ if(defined($params[$i]) && defined($params[$i+1])){ logger("NOTICE", " ==> $params[$i] = $params[$i+1]"); }else{ logger("NOTICE", " ==> ".(defined($params[$i]) ? $params[$i] : "undef name")." = ".(defined($params[$i+1]) ? $params[$i+1] : "undef value")." <== ignoring pair"); splice(@params, $i, 2); $i-=2; } } my $header = "v:${VERSION}_plp:$pass"; my $msg = ApMon::XDRUtils::encodeString($header) . ApMon::XDRUtils::encodeINT32($sender->{INSTANCE_ID}) . ApMon::XDRUtils::encodeINT32($sender->{SEQ_NR}) . ApMon::XDRUtils::encodeParameters($clusterName, $nodeName, $time, @params); my $in_addr = inet_aton($host); my $in_paddr = sockaddr_in($port, $in_addr); my $msg_len = length($msg); if(send(SOCKET, $msg, 0, $in_paddr) != $msg_len){ logger("ERROR", "Could not send UDP datagram to $host:$port"); }else{ logger("NOTICE", "~~~~> Packet sent successfully; total size=$msg_len bytes."); } } # This is called by child processes to read messages (if they exist) from the parent. sub readMessage { my $PIPE = shift; my ($rin, $win, $ein, $rout, $wout, $eout) = ('', '', ''); my $retMsg = ""; vec($rin,fileno($PIPE),1) = 1; $ein = $rin | $win; my ($nfound,$timeleft) = select($rout=$rin, $wout=$win, $eout=$ein, 0); if($nfound){ sysread($PIPE, $retMsg, 1024); logger("DEBUG", "readMessage: $retMsg"); } return $retMsg; } # This is called by main process to send a message to a child that reads form the given pipe sub writeMessage { my ($PIPE, $msg) = @_; if(defined $PIPE){ logger("DEBUG", "writeMessage: $msg"); syswrite($PIPE, $msg); }else{ logger("ERROR", "Trying to send '$msg' to child, but the pipe is not defined!"); } } # copy the time when last data was sent sub updateLastSentTime { my ($srcOpts, $dstOpts) = @_; $dstOpts->{'general_data_sent'} = $srcOpts->{'general_data_sent'} if $srcOpts->{'general_data_sent'}; $dstOpts->{'sys_data_sent'} = $srcOpts->{'sys_data_sent'} if $srcOpts->{'sys_data_sent'}; $dstOpts->{'job_data_sent'} = $srcOpts->{'job_data_sent'} if $srcOpts->{'job_data_sent'}; } # This is used to update the configuration for an object that has in it's base hash the following elements # DESTINATIONS, CONF_RECHECK, LAST_CONF_CHECK_TIME, CONF_CHECK_INTERVAL and CONF_FILE. # In practice, both ApMon and BgMonitor use it to update their configuration. sub updateConfig { my $this = shift; if(! $this->{ALLOW_BG_PROCESSES}){ $this->{DESTINATIONS} = $this->{CONFIG_LOADER}->{DESTINATIONS}; return; } my $now = time; if((scalar(keys %{$this->{DESTINATIONS}}) > 0 and $this->{CONF_RECHECK} == 0) or ($this->{LAST_CONF_CHECK_TIME} + $this->{CONF_CHECK_INTERVAL} > $now)){ return; } logger("DEBUG", "Updating configuration from $this->{CONF_FILE}"); if(open(CONF, "<$this->{CONF_FILE}")){ my $prevDest = $this->{DESTINATIONS} || {}; $this->{DESTINATIONS} = {}; # clear old destinations first my ($crtDest, $line); while($line = ){ chomp $line; if($line =~ /^(\S+):(\S+):(\S*)$/){ # reading a new destination $crtDest = $line; my %defOpts = %defaultOptions; #get a copy of the default options $this->{DESTINATIONS}->{$crtDest}->{OPTS} = \%defOpts; updateLastSentTime($prevDest->{$crtDest}->{OPTS}, $this->{DESTINATIONS}->{$crtDest}->{OPTS}); $this->{DESTINATIONS}->{$crtDest}->{PREV_RAW_DATA} = ($prevDest->{$crtDest}->{PREV_RAW_DATA} ? $prevDest->{$crtDest}->{PREV_RAW_DATA} : {}); logger("DEBUG", "Adding destination $line"); }elsif($line =~ /^\s(\S+)=(\S+)/) { # reading an attribute for the current destination and modify the current options my ($name, $value) = ($1, $2); logger("DEBUG", "Adding $name=$value"); if($name eq 'loglevel'){ $this->setLogLevel($value); }elsif($name eq 'conf_recheck'){ $this->{CONF_RECHECK} = $value; }elsif($name eq 'recheck_interval'){ $this->{CONF_CHECK_INTERVAL} = $value; }elsif($name eq 'maxMsgRate'){ $this->setMaxMsgRate($value); }else{ $this->{DESTINATIONS}->{$crtDest}->{OPTS}->{$name} = $value; } }else{ logger("WARNING", "Unknown line in conf file: $line"); } } close CONF; }else{ logger("ERROR", "Error opening temporary config file $this->{CONF_FILE}. Current config is unchanged."); return; } $this->{LAST_CONF_CHECK_TIME} = time; } # don't allow a user to send more than MAX_MSG messages per second, in average my $prvTime = 0; my $prvSent = 0; my $prvDrop = 0; my $crtTime = 0; my $crtSent = 0; my $crtDrop = 0; my $hWeight = 0.92; # Decide if the current datagram should be sent. # This decision is based on the number of messages previously sent. sub shouldSend { my $now = time; if($now != $crtTime){ # new time # update previous counters; $prvSent = $hWeight * $prvSent + (1 - $hWeight) * $crtSent / ($now - $crtTime); $prvTime = $crtTime; logger("DEBUG", "previously sent: $crtSent; dropped: $crtDrop"); # reset current counter $crtTime = $now; $crtSent = 0; $crtDrop = 0; } my $valSent = $prvSent * $hWeight + $crtSent * (1 - $hWeight); # compute the history my $doSend = 1; my $level = $MAX_MSG_RATE - $MAX_MSG_RATE / 10; # when we should start dropping messages if($valSent > $MAX_MSG_RATE - $level){ $doSend = rand($MAX_MSG_RATE / 10) < ($MAX_MSG_RATE - $valSent); } # counting sent and dropped messages if($doSend){ $crtSent++; }else{ $crtDrop++; } return $doSend; } # Try to generate a more random instance id. It takes the process ID and # combines it with the last digit from the IP addess and a random number sub getInstanceID { my $pid = $$; my $ip = int(rand(256)); # last digit of the ip address my $host = hostname(); # from Sys::Hostname if($host){ my $addr = inet_ntoa(scalar gethostbyname($host)); $ip = $1 if $addr =~ /(\d+)$/; } my $rnd = int(rand(256)); my $iid = ($pid << 16) | ($ip << 8) | $rnd; # from all this, generate the instance id return $iid; } # Try to determine the CPU type. Returns a hash with: cpu_model_name, cpu_MHz, cpu_cache (in KB) # TODO: make this work also for Mac. sub getCpuType { my $cpu_type = {}; if(-r "/proc/cpuinfo"){ if(open(CPU_INFO, "){ if($line =~ /cpu MHz\s+:\s+(\d+\.?\d*)/){ $cpu_type->{"cpu_MHz"} = $1; $CpuMHz = $1; } $cpu_type->{"cpu_model_name"} = $1 if($line =~ /model name\s+:\s+(.+)/ || $line =~ /family\s+:\s+(.+)/); $cpu_type->{"cpu_cache"} = $1 if($line =~ /cache size\s+:\s+(\d+)/); } close(CPU_INFO); }else{ logger("NOTICE", "Cannot open /proc/cpuinfo"); } } if(-r "/proc/pal/cpu0/cache_info"){ if(open(CACHE_INFO, "){ $level3params = 1 if($line =~/Cache level 3/); $cpu_type->{"cpu_cache"} = $1 / 1024 if ($level3params && $line =~ /Size\s+:\s+(\d+)/); } close(CACHE_INFO); }else{ logger("NOTICE", "Cannot open /proc/pal/cpu0/cache_info"); } } if(! scalar(keys(%$cpu_type))){ logger("INFO", "Cannot get cpu type"); return undef; } return $cpu_type; } # Set the SI2K performance meter for this machine. If this function is called then parameter # cpu_ksi2k will also be reported for the job monitoring with a value computed this way: # cpu_ksi2k(job) = cpu_time(job) * ( si2k / 1000) sub setCpuSI2k { my $si2k = shift; $KSI2K = $si2k / 1000.0 if($si2k); } 1; ================================================ FILE: ApMon/perl/ApMon/ApMon/ConfigLoader.pm ================================================ package ApMon::ConfigLoader; use strict; use warnings; use ApMon::Common qw(logger $APMON_DEFAULT_PORT %defaultOptions); use Socket; use Data::Dumper; use Carp qw(cluck); # Config Loader constructor sub new { my ($type, $cmdPipe, $confFile) = @_; my $this = {}; bless $this; $this->{CMD_PIPE} = $cmdPipe; $this->{CONF_FILE} = $confFile; $this->{LAST_CONF_CHECK_TIME} = 0; $this->{CONF_RECHECK} = 1; $this->{CONF_CHECK_INTERVAL} = 30; $this->{DEST_LOCATIONS} = (); # http/file locations from where to read the config $this->{DESTINATIONS} = {}; return $this; } # This call will never return! # It should be a used just from a child process whose role is only configuration refreshing. sub run { my $this = shift; my ($wasSuccess, $userMsg) = (0, undef); while(1) { $userMsg = ApMon::Common::readMessage($this->{CMD_PIPE}); $this->parseParentMessage($userMsg) if $userMsg; $wasSuccess = $this->refreshConfig($wasSuccess) if $this->{CONF_RECHECK}; sleep($this->{CONF_CHECK_INTERVAL}); } } # This allows setting the configuration. It can be used with several arguments: # - list of strings (URLs and/or files) - the configuration will be read from all # - reference to an ARRAY - each element is a destination ML service; for each destination # the default options will be used # - reference to a HASH - each key is a destination ML service; for each destination you can # define a set of additional options that will overwrite the default ones. sub setDestinations { my ($this, @destLocations) = @_; my $prevDest = $this->{DESTINATIONS}; $this->{DESTINATIONS} = {}; # determine the way we were instantiated and initalize accordingly if(ref($destLocations[0]) eq "ARRAY"){ # user gave a reference to an array, each element being a destination (host[:port][ pass]) # we will send datagrams to all valid destinations (i.e. host can be resolved), with default options $this->{CONF_RECHECK} = 0; my ($destStr, $dest); for $destStr (@{$destLocations[0]}) { $dest = $this->parseDestination($destStr); if($dest){ my %defOptsCopy = %defaultOptions; logger("INFO", "Added destination $dest with default options."); $this->{DESTINATIONS}->{$dest}->{OPTS} = \%defOptsCopy; ApMon::Common::updateLastSentTime($prevDest->{$dest}->{OPTS}, $this->{DESTINATIONS}->{$dest}->{OPTS}); $this->{DESTINATIONS}->{$dest}->{PREV_RAW_DATA} = ($prevDest->{$dest}->{PREV_RAW_DATA} ? $prevDest->{$dest}->{PREV_RAW_DATA} : {}); $this->{DESTINATIONS}->{$dest}->{OPTS}->{'conf_recheck'} = 0; } } $this->writeDestinations(); }elsif(ref($destLocations[0]) eq "HASH"){ # user gave a reference to a hash, each key being a destination (host[:port][ pass]) # we will send datagrams to all valid destinations (i.e. host can be resolved), overwritting the # default options with the ones passed by user. Options will be named as in the %defaultOptions. $this->{CONF_RECHECK} = 0; my ($destStr, $dest); for $destStr (keys %{$destLocations[0]}){ $dest = $this->parseDestination($destStr); if($dest){ my %defOptsCopy = %defaultOptions; $this->{DESTINATIONS}->{$dest}->{OPTS} = \%defOptsCopy; ApMon::Common::updateLastSentTime($prevDest->{$dest}->{OPTS}, $this->{DESTINATIONS}->{$dest}->{OPTS}); $this->{DESTINATIONS}->{$dest}->{PREV_RAW_DATA} = ($prevDest->{$dest}->{PREV_RAW_DATA} ? $prevDest->{$dest}->{PREV_RAW_DATA} : {}); $this->{DESTINATIONS}->{$dest}->{OPTS}->{'conf_recheck'} = 0; logger("INFO", "Added destination $dest with the following additional options:"); # now we have to modify default options with the ones given by user my ($key, $value); for $key (keys %{$destLocations[0]->{$destStr}}){ $value = $destLocations[0]->{$destStr}->{$key}; logger("INFO", " -> $key = $value"); $this->{DESTINATIONS}->{$dest}->{OPTS}->{$key} = $value; } } } $this->writeDestinations(); }else{ # we got a list of URLs and/or files. Fetch them and get the configuration $this->{DEST_LOCATIONS} = (); push(@{$this->{DEST_LOCATIONS}}, @destLocations); $this->refreshConfig(); } } # This will fetch all the configuration files and then, if this part was succesful, it # will call parseConfig to build the temporary configuration file from which both Main # and BgMonitor will read the destinations sub refreshConfig { my $this = shift; my $wasSuccess = shift || 0; if(! $this->{DEST_LOCATIONS} || (! @{$this->{DEST_LOCATIONS}})){ logger("NOTICE", "No configuration file was given."); return $wasSuccess; } my ($error, $linesRef); logger("DEBUG", "Refreshing config from pid $$"); ($error, $linesRef) = $this->fetchConfig(@{$this->{DEST_LOCATIONS}}); if(! $error){ # or ($error and ($wasSuccess < @$linesRef))){ # it reading destinations worked ok, or if we had a partial error reading files, # but the configuration size is bigger than earlier, apply those new changes $wasSuccess = @$linesRef; $this->parseConfig($linesRef); }else{ logger("WARNING", "Failed reading destination files/urls. Configuration will remain unchanged."); } return $wasSuccess; } # fetch the configuration form all given files/URLs. It returns a pair ($error, $linesRef) where # $error contains the number of locations from where the retrieval of the configuration failed. # $linesRef is a reference to an array containing all the lines. sub fetchConfig { my ($this, @dests) = @_; my @lines = (); my $error = 1; for my $dest (@dests){ if ( $dest =~ /^http:\/\// ) { logger("INFO", "Reading config from url: $dest"); require LWP::UserAgent; my $ua = LWP::UserAgent->new(); $ua->timeout(5); $ua->env_proxy(); my $response = $ua->get($dest); if($response->is_success){ push(@lines, split("\n", $response->content . "\nEND_PART\n")); $error = 0; }else{ logger("WARNING", "Error reading url: $dest"); logger("WARNING", "Got: ".$response->status_line); } }else{ logger("INFO", "Reading config from file: $dest"); if(open(INFILE, "<$dest")){ my @newlines = ; push(@lines, @newlines); close(INFILE); push(@lines, split("\n", "\nEND_PART\n")); $error = 0; }else{ logger("WARNING", "Error reading file: $dest"); } } } return ($error, \@lines); } # This will parse the config lines brought by fetchConfig, creating the local temporary config file sub parseConfig { my ($this, $linesRef) = @_; my @lines = @$linesRef; my @dests = (); my %opts = (); my $prevDest = $this->{DESTINATIONS}; $this->{DESTINATIONS} = {}; for my $line (@lines) { chomp $line; next if $line =~ /^\s*$/; # skip empty lines next if $line =~ /^\s*#/; # skip comments $line =~ s/\s+/ /g; # eliminate multiple spaces $line =~ s/^ //; # remove space at the beginning $line =~ s/ $//; # remove space at the end if($line =~ /^xApMon_(.*)/){ # set an option for the current destinations my $opt = $1; if($opt =~ /(\S+)\s?=\s?(\S+)/){ my ($name, $value) = ($1, $2); $value = $value =~ /off/i ? 0 : $value; $value = $value =~ /on/i ? 1 : $value; $opts{$name} = $value; $this->setLogLevel($value) if $name eq "loglevel"; $this->setMaxMsgRate($value) if $name eq "maxMsgRate"; $this->{CONF_RECHECK} = $value if $name eq "conf_recheck"; $this->{CONF_CHECK_INTERVAL} = $value if $name eq "recheck_interval"; #logger("DEBUG", "set option $name <- $value"); } }elsif($line =~ /END_PART/){ #logger("DEBUG", "Storing options into the temp conf file"); for my $dest (@dests){ my %optsCopy = %opts; $this->{DESTINATIONS}->{$dest}->{OPTS} = \%optsCopy; ApMon::Common::updateLastSentTime($prevDest->{$dest}->{OPTS}, $this->{DESTINATIONS}->{$dest}->{OPTS}); $this->{DESTINATIONS}->{$dest}->{PREV_RAW_DATA} = ($prevDest->{$dest}->{PREV_RAW_DATA} ? $prevDest->{$dest}->{PREV_RAW_DATA} : {}); } @dests = (); %opts = (); }else{ # parse a new destination my $dest = $this->parseDestination($line); push(@dests, $dest) if $dest; } } $this->writeDestinations(); } # Write the destinations to the temporary config file, to be able to get them also from the # other processes sub writeDestinations { my $this = shift; if(open(CONF, ">".$this->{CONF_FILE}.".tmp")) { logger("DEBUG", "Writting config to $this->{CONF_FILE}"); my ($dest, $opt, $val); for $dest (keys %{$this->{DESTINATIONS}}) { print CONF "$dest\n"; for $opt (keys %{$this->{DESTINATIONS}->{$dest}->{OPTS}}) { $val = $this->{DESTINATIONS}->{$dest}->{OPTS}->{$opt}; print CONF " $opt=$val\n"; } } close(CONF); chmod(0600, $this->{CONF_FILE}.'.tmp'); # this is done in order to keep the interference between the processes as small as possible rename($this->{CONF_FILE}.'.tmp', $this->{CONF_FILE}); }else{ logger("ERROR", "Cannot write destinations to file $this->{CONF_FILE}"); } } # Given a destination line (i.e. host[:port][ passwd]), it returns an array containing at most one # string of the following form: "ip:port:passwd" # This is what will be used as a destination in sending the directSendParameters. sub parseDestination { my ($this, $line) = @_; my $dest = ""; if($line =~ /([\.\-a-zA-Z0-9]+)\s*:?\s*(\d+)?\s*(.*)?/){ my ($host, $port, $pass) = ($1, $2, $3); $port = (! defined($port) || $port eq "") ? $APMON_DEFAULT_PORT : $port; my ($name,$aliases,$type,$len,$addr) = gethostbyname($host); if (defined($len) and $len == 4) { my $ip = inet_ntoa($addr); logger("DEBUG", "found destination i=$ip, P=$port, p=$pass"); $dest = "$ip:$port:$pass"; }else{ logger("WARNING", "Error resolving host $host"); } } return $dest; } # This will parse the options sent by functions in ApMon sub parseParentMessage { my ($this, $msg) = @_; my @msgs = split(/\n/, $msg); my @dests = (); logger("DEBUG", "Reading messages from user"); for $msg (@msgs){ $this->setLogLevel($1) if $msg =~ /loglevel:(.*)/; $this->setMaxMsgRate($1) if $msg =~ /maxMsgRate:(.*)/; $this->{CONF_RECHECK} = $1 if $msg =~ /conf_recheck:(.*)/; $this->{CONF_CHECK_INTERVAL} = $1 if $msg =~ /recheck_interval:(.*)/; push(@dests, $1) if $msg =~/dest:(.*)/; } $this->setDestinations(@dests) if @dests; } # Sets the log level for CONFIG_LOADER sub setLogLevel { my ($this, $level) = @_; ApMon::Common::setLogLevel($level); } # Sets the maximum rate for the messages sent by user sub setMaxMsgRate { my ($this, $rate) = @_; ApMon::Common::setMaxMsgRate($rate); } 1; ================================================ FILE: ApMon/perl/ApMon/ApMon/ProcInfo.pm ================================================ package ApMon::ProcInfo; use strict; use warnings; use ApMon::Common qw(logger); use Data::Dumper; use Net::Domain; use Time::Local; use Config; # See the end of this file for a set of interesting methods for other modules. # ProcInfo constructor sub new { my $this = {}; $this->{DATA} = {}; # monitored data that is going to be reported $this->{JOBS} = {}; # jobs that will be monitored $this->{NETWORKINTERFACES} = {};# network interface names # names of the months for ps start time of a process $this->{MONTHS} = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']; $this->{readGI} = 0; # used to read generic information less often bless $this; return $this; } # this has to be run twice (with the $lastUpdateTime updated) to get some useful results sub readStat { my $this = shift; if ($Config{osname} eq "solaris"){ if (open(VMSTAT, "vmstat -s |")){ my $line; while ($line = ){ if ($line =~ /(\d+)\s+user\s+cpu/ ){ $this->{DATA}->{"raw_cpu_usr"} = $1; } if ($line =~ /(\d+)\s+system\s+cpu/){ $this->{DATA}->{"raw_cpu_sys"} = $1; } if ($line =~ /(\d+)\s+idle\s+cpu/){ $this->{DATA}->{"raw_cpu_idle"} = $1; } if ($line =~ /(\d+)\s+wait\s+cpu/){ $this->{DATA}->{"raw_cpu_iowait"} = $1; } if ($line =~ /(\d+)\s+pages\s+swapped\s+in/){ $this->{DATA}->{"raw_swap_in"} = $1; } if ($line =~ /(\d+)\s+pages\s+swapped\s+out/){ $this->{DATA}->{"raw_swap_out"} = $1; } if ($line =~ /(\d+)\s+device\s+interrupts/){ $this->{DATA}->{"raw_interrupts"} = $1; } if ($line =~ /(\d+)\s+cpu\s+context\s+switches/){ $this->{DATA}->{"raw_context_switches"} = $1; } } close VMSTAT; } $this->{DATA}->{"raw_blocks_in"} = $this->{DATA}->{"raw_blocks_out"} = 0; if (open(IOSTAT, "iostat -xnI |")){ my $line = ; $line = ; my $kbR = 0; my $kbW = 0; while ($line = ){ (undef, $kbR, $kbW) = split(/\s+/, $line); $this->{DATA}->{"raw_blocks_in"} += $kbR; $this->{DATA}->{"raw_blocks_out"} += $kbW; } close IOSTAT; } return; } if(open(STAT, "){ if($line =~ /^cpu\s/) { (undef, $this->{DATA}->{"raw_cpu_usr"}, $this->{DATA}->{"raw_cpu_nice"}, $this->{DATA}->{"raw_cpu_sys"}, $this->{DATA}->{"raw_cpu_idle"}, $this->{DATA}->{"raw_cpu_iowait"}, $this->{DATA}->{"raw_cpu_irq"}, $this->{DATA}->{"raw_cpu_softirq"}, $this->{DATA}->{"raw_cpu_steal"}, $this->{DATA}->{"raw_cpu_guest"} ) = split(/ +/, $line); } if($line =~ /^page/) { # this doesn't work for kernel >2.5 (undef, $this->{DATA}->{"raw_blocks_in"}, $this->{DATA}->{"raw_blocks_out"}) = split(/ +/, $line); } if($line =~/^swap/) { # this also doesn't work in >2.5 (undef, $this->{DATA}->{"raw_swap_in"}, $this->{DATA}->{"raw_swap_out"}) = split(/ +/, $line); } $this->{DATA}->{"raw_interrupts"} = $1 if($line =~ /^intr\s(\d+)/); $this->{DATA}->{"raw_context_switches"} = $1 if($line =~ /^ctxt\s(\d+)/); } close STAT; }else{ logger("NOTICE", "ProcInfo: cannot open /proc/stat"); #disable ..? } # blocks_in/out and swap_in/out are moved to /proc/vmstat in >2.5 kernels if(-r "/proc/vmstat"){ if(open(VMSTAT, "){ $this->{DATA}->{"raw_blocks_in"} = $1 if($line =~ /^pgpgin\s(\d+)/); $this->{DATA}->{"raw_blocks_out"}= $1 if($line =~ /^pgpgout\s(\d+)/); $this->{DATA}->{"raw_swap_in"} = $1 if($line =~ /^pswpin\s(\d+)/); $this->{DATA}->{"raw_swap_out"} = $1 if($line =~ /^pswpout\s(\d+)/); } close VMSTAT; }else{ logger("NOTICE", "Procinfo: cannot open /proc/vmstat"); } } } # sizes are reported in MB (except _usage that is in percent). sub readMemInfo { my $this = shift; if ($Config{osname} eq "solaris"){ if (open(MEM_INFO, "prtconf |")){ my $line; while ($line = ){ if ($line =~ /^Memory size: (\d+) (\w)/){ $this->{DATA}->{"total_mem"} = $1; if ($2 eq "G"){ $this->{DATA}->{"total_mem"} *= 1024; } } } close MEM_INFO; } if (open(MEM_INFO, "vmstat |")){ my $line; # first header line $line = ; # second header line $line = ; # and the contents $line = ; if ($line =~ /^\s*\d+\s+\d+\s+\d+\s+\d+\s+(\d+)/){ my $memfree = $1 / 1024; $this->{DATA}->{"mem_free"} = $memfree; $this->{DATA}->{"mem_actual_free"} = $memfree; $this->{DATA}->{"mem_used"} = $this->{DATA}->{"total_mem"} - $memfree; $this->{DATA}->{"mem_usage"} = $this->{DATA}->{"mem_used"} * 100 / $this->{DATA}->{"total_mem"} if $this->{DATA}->{"total_mem"}; } close MEM_INFO; } if (open(MEM_INFO, "swap -l |")){ my $line; $line = ; $this->{DATA}->{"total_swap"} = 0; $this->{DATA}->{"swap_free"} = 0; while ($line = ){ if ($line =~ /(\d+)\s+(\d+)$/){ $this->{DATA}->{"total_swap"} += $1 / 2048; $this->{DATA}->{"swap_free"} += $2 / 2048; } } $this->{DATA}->{"swap_used"} = $this->{DATA}->{"total_swap"} - $this->{DATA}->{"swap_free"}; $this->{DATA}->{"swap_usage"} = 100.0 * $this->{DATA}->{"swap_used"} / $this->{DATA}->{"total_swap"} if $this->{DATA}->{"total_swap"}; } return; } if(open(MEM_INFO, "){ if($line =~ /^MemFree:/){ my (undef, $mem_free) = split(/ +/, $line); $this->{DATA}->{"mem_free"} = $mem_free / 1024.0; } if($line =~ /^MemTotal:/){ my (undef, $mem_total) = split(/ +/, $line); $this->{DATA}->{"total_mem"} = $mem_total / 1024.0; } if($line =~ /^SwapFree:/){ my (undef, $swap_free) = split(/ +/, $line); $this->{DATA}->{"swap_free"} = $swap_free / 1024.0; } if($line =~ /^SwapTotal:/){ my (undef, $swap_total) = split(/ +/, $line); $this->{DATA}->{"total_swap"} = $swap_total / 1024.0; } if($line =~ /^Buffers:/){ my (undef, $buffers) = split(/ +/, $line); $this->{DATA}->{"mem_buffers"} = $buffers / 1024.0; } if($line =~ /^Cached:/){ my (undef, $cached) = split(/ +/, $line); $this->{DATA}->{"mem_cached"} = $cached / 1024.0; } } close MEM_INFO; $this->{DATA}->{"mem_actualfree"} = $this->{DATA}->{"mem_free"} + $this->{DATA}->{"mem_buffers"} + $this->{DATA}->{"mem_cached"} if ($this->{DATA}->{"mem_free"} && $this->{DATA}->{"mem_buffers"} && $this->{DATA}->{"mem_cached"}); $this->{DATA}->{"mem_used"} = $this->{DATA}->{"total_mem"} - $this->{DATA}->{"mem_actualfree"} if ($this->{DATA}->{"total_mem"} && $this->{DATA}->{"mem_actualfree"}); $this->{DATA}->{"swap_used"} = $this->{DATA}->{"total_swap"} - $this->{DATA}->{"swap_free"} if $this->{DATA}->{"total_swap"}; $this->{DATA}->{"mem_usage"} = 100.0 * $this->{DATA}->{"mem_used"} / $this->{DATA}->{"total_mem"} if ($this->{DATA}->{"total_mem"} && $this->{DATA}->{"mem_used"}); $this->{DATA}->{"swap_usage"} = 100.0 * $this->{DATA}->{"swap_used"} / $this->{DATA}->{"total_swap"} if $this->{DATA}->{"total_swap"}; }else{ logger("NOTICE", "ProcInfo: cannot open /proc/meminfo"); } } # read the number of processes currently running on the system # count also the number of runnable, sleeping, zombie, io blocked and traced processes # works on Darwin sub countProcesses { my $this = shift; my $total = 0; my %states = ('D' => 0, 'R' => 0, 'S' => 0, 'T' => 0, 'Z' => 0); my $command = "ps -A -o state |"; if ($Config{osname} eq "solaris"){ $command = "ps -A -o s |"; } if(open(PROC, $command)){ my $state = ; # ignore the first line - it's the header while(){ $state = substr($_, 0, 1); $states{$state}++; $total++; } close PROC; $this->{DATA}->{"processes"} = $total; for $state (keys %states){ next if (($state eq '') || ($state =~ /\s+/)); $this->{DATA}->{"processes_$state"} = $states{$state}; } } else{ logger("NOTICE", "ProcInfo: cannot count the processes using ps."); } } #Read information about CPU. sub readCPUInfo { my $this = shift; if ($Config{osname} eq "solaris"){ chomp ($this->{DATA}->{"no_CPUs"} = `psrinfo -p`); return; } if(-r "/proc/cpuinfo"){ if(open(CPU_INFO, "){ if($line =~ /cpu MHz\s+:\s+(\d+\.?\d*)/){ $this->{DATA}->{"cpu_MHz"} = $1; $no_cpus ++; } if($line =~ /vendor_id\s+:\s+(.+)/ || $line =~ /vendor\s+:\s+(.+)/){ $this->{DATA}->{"cpu_vendor_id"} = $1; } if($line =~ /cpu family\s+:\s+(.+)/ || $line =~ /revision\s+:\s+(.+)/){ $this->{DATA}->{"cpu_family"} = $1; } if($line =~ /model\s+:\s+(.+)/) { $this->{DATA}->{"cpu_model"} = $1; } if($line =~ /model name\s+:\s+(.+)/ || $line =~ /family\s+:\s+(.+)/){ $this->{DATA}->{"cpu_model_name"} = $1; } if($line =~ /bogomips\s+:\s+(\d+\.?\d*)/ || $line =~ /BogoMIPS\s+:\s+(\d+\.?\d*)/){ $this->{DATA}->{"bogomips"} = $1; } if($line =~ /cache size\s+:\s+(\d+)/){ $this->{DATA}->{"cpu_cache"} = $1; } } close CPU_INFO; $this->{DATA}->{"no_CPUs"} = $no_cpus; } } # this is for Itanium if(-r "/proc/pal/cpu0/cache_info"){ if(open(CACHE_INFO, "){ $level3params = 1 if ($line =~ /Cache level 3/); $this->{DATA}->{"cpu_cache"} = $1 / 1024 if ($level3params && $line =~ /Size\s+:\s+(\d+)/); } close(CACHE_INFO); } } # also put the ksi2k factor, if known $this->{DATA}->{"ksi2k_factor"} = $ApMon::Common::KSI2K if $ApMon::Common::KSI2K; } # reads the IP, hostname, cpu_MHz, kernel_version, os_version, platform sub readGenericInfo { my $this = shift; my $hostname = Net::Domain::hostfqdn(); $this->{DATA}->{"hostname"} = $hostname; if ($Config{osname} eq "solaris"){ chomp ($this->{DATA}->{"os_type"} = `uname -sr`); $this->{DATA}->{"platform"} = "solaris"; $this->{DATA}->{"kernel_version"} = $Config{osvers}; if (open(IF_CFG, "ifconfig -a4 |")){ my ($eth, $ip, $line); while ($line = ){ if ($line =~ /^(\w+\d):/){ $eth = $1; } if (defined($eth) and ($line =~ /\s+inet\s+(\d+\.\d+\.\d+\.\d+)/)){ $ip = $1; next if ($eth =~ /^lo/); $this->{DATA}->{$eth."_ip"} = $ip; # fake eth0 on solaris $this->{DATA}->{"eth0_ip"} = $ip unless $this->{DATA}->{"eth0_ip"}; } } } return; } if(open(IF_CFG, "/sbin/ifconfig -a |")){ my ($eth, $ip, $ipv6, $line); while($line = ){ if($line =~ /^(\w+):?\s+/ ){ undef $ip; if (exists($this->{NETWORKINTERFACES}->{$1})){ $eth = $1; undef $ip; undef $ipv6; } else{ undef $eth; } next; } if ($line =~ /^\w/){ undef $eth; undef $ip; undef $ipv6; next; } if(defined($eth) and ($line =~ /\s+inet( addr:)?\s*(\d+\.\d+\.\d+\.\d+)/) and ! defined($ip)){ $ip = $2; $this->{DATA}->{$eth."_ip"} = $ip; undef $ipv6; } if(defined($eth) and ($line =~ /\s+inet6( addr:)?\s*([0-9a-fA-F:]+).*(Scope:Global|scopeid.*global)/) and ! defined($ipv6)){ $ipv6 = $2; $this->{DATA}->{$eth."_ipv6"} = $ipv6; } } close IF_CFG; }else{ logger("NOTICE", "ProcInfo: couldn't get output from /sbin/ifconfig -a"); } # determine the kernel version my $line = `uname -r`; chomp $line; $this->{DATA}->{"kernel_version"} = $line; # determine the platform $line = `uname -m 2>/dev/null || uname`; chomp $line; $this->{DATA}->{"platform"} = $line; # try to determine the OS type my $osType = ""; if(open(LSB_RELEASE, 'env PATH=$PATH:/bin:/usr/bin lsb_release -d 2>/dev/null |')){ my $line = ; $osType = $1 if ($line && $line =~ /Description:\s*(.*)/); close LSB_RELEASE; } if(! $osType){ for my $f ("/etc/redhat-release", "/etc/debian_version", "/etc/SuSE-release", "/etc/slackware-version", "/etc/gentoo-release", "/etc/mandrake-release", "/etc/mandriva-release", "/etc/issue"){ if(open(VERF, "$f")){ $osType = ; chomp $osType; close VERF; last; } } } if(! $osType){ $osType = `uname -s`; chomp $osType; } $this->{DATA}->{"os_type"} = $osType; } # read system's uptime and load average. Time is reported as a floating number, in days. # It uses the 'uptime' command which's output looks like these: # 19:55:37 up 11 days, 18:57, 1 user, load average: 0.00, 0.00, 0.00 # 18:42:31 up 87 days, 18:10, 9 users, load average: 0.64, 0.84, 0.80 # 6:42pm up 7 days 3:08, 7 users, load average: 0.18, 0.14, 0.10 # 6:42pm up 33 day(s), 1:54, 1 user, load average: 0.01, 0.00, 0.00 # 18:42 up 7 days, 3:45, 2 users, load averages: 1.10 1.11 1.06 # 18:47:41 up 7 days, 4:35, 19 users, load average: 0.66, 0.44, 0.41 # 15:10 up 8 days, 12 mins, 2 users, load averages: 1.46 1.27 1.18 # 11:57am up 2:21, 22 users, load average: 0.59, 0.93, 0.73 sub readUptimeAndLoadAvg { my $this = shift; my $line = `uptime`; chomp $line; if($line =~ /up\s+((\d+)\s+day[ (s),]+)?(\d+)(:(\d+))?[^\d]+(\d+)[^\d]+([\d\.]+)[^\d]+([\d\.]+)[^\d]+([\d\.]+)/){ my ($days, $hour, $min, $users, $load1, $load5, $load15) = ($2, $3, $5, $6, $7, $8, $9); if(! $min){ $min = $hour; $hour = 0; } $days = 0 if ! $days; my $uptime = $days + $hour / 24.0 + $min / 1440.0; $this->{DATA}->{"uptime"} = $uptime; $this->{DATA}->{"logged_users"} = $users; # this is currently not reported! $this->{DATA}->{"load1"} = $load1; $this->{DATA}->{"load5"} = $load5; $this->{DATA}->{"load15"}= $load15; } else{ logger("NOTICE", "ProcInfo: got unparsable output from uptime: $line"); } } sub readEosDiskValues { my $this = shift; my $storagepath=$ENV{"APMON_STORAGEPATH"}; if ( "$storagepath" eq "" ) { $storagepath = "data"; } if (open IN, "df -P -B 1 | grep $storagepath | grep -v Filesystem | awk '{a+=\$2;b+=\$3;c+=\$4;print a,b,c}' | tail -1|") { my $all = ; if ($all) { my @vals = split (" ",$all); $this->{DATA}->{"eos_disk_space"} = sprintf "%.03f",$vals[0]/1024.0/1024.0/1024.0/1024.0; $this->{DATA}->{"eos_disk_used"} = sprintf "%.03f",$vals[1]/1024.0/1024.0/1024.0/1024.0; $this->{DATA}->{"eos_disk_free"} = sprintf "%.03f",$vals[2]/1024.0/1024.0/1024.0/1024.0; $this->{DATA}->{"eos_disk_usage"} = sprintf "%d",100.0 *$vals[1]/$vals[0]; } close(IN); } } sub readEosRpmValues { my $this = shift; if (open IN, "rpm -qa eos-xrootd | cut -d '-' -f3 |") { my $all = ; if ($all) { chomp $all; $all =~ s/xrootd-//; $this->{DATA}->{"xrootd_rpm_version"} = 'v'.$all; } close(IN); } if (open IN, "rpm -qa eos-server |") { my $all = ; if ($all) { chomp $all; $all =~ s/eos-server-//; $this->{DATA}->{"eos_rpm_version"} = $all; } close(IN); } } sub show_call_stack { my ( $path, $line, $subr ); my $max_depth = 30; my $i = 1; while ( (my @call_details = (caller($i++))) && ($i<$max_depth) ) { print "$call_details[1] line $call_details[2] in function $call_details[3]\n"; } } # do a difference with overflow check and repair # the counter is unsigned 32 or 64 bit sub diffWithOverflowCheck { my ($this, $new, $old) = @_; if($new >= $old){ return $new - $old; } else{ return $new; } } # read network information like transfered kBps and nr. of errors on each interface # TODO: find an alternative for MAC OS X sub readNetworkInfo { my $this = shift; $this->{NETWORKINTERFACES} = {}; if ($Config{osname} eq "solaris"){ my $ifname; my $line; if (open(NET_DEV, "ifconfig -a4 |")){ while ($line = ){ next if ($ifname); if ($line =~ /^(\w+\d):\s+/){ next if ($line =~ /^lo/); $ifname = $1; } } close NET_DEV; } my $bytesIn = 0; my $bytesOut = 0; if (open(NET_DEV,"netstat -P tcp -s |")){ while ($line = ){ if ($line =~ /tcpOut\w+Bytes\s*=\s*(\d+)/){ $bytesOut += $1; } if ($line =~ /tcpRetransBytes\s*=\s*(\d+)/){ $bytesOut += $1; } if ($line =~ /tcpIn\w+Bytes\s*=\s*(\d+)/){ $bytesIn += $1; } } close NET_DEV; $this->{DATA}->{"raw_net_".$ifname."_in"} = $bytesIn; $this->{DATA}->{"raw_net_".$ifname."_out"} = $bytesOut; $this->{DATA}->{"raw_net_".$ifname."_err"} = 0; #fake eth0 traffic, even if on Solaris the interfaces have weird names #and moreover we cannot tell the traffic per each interface... $this->{DATA}->{"raw_net_eth0_in"} = $bytesIn; $this->{DATA}->{"raw_net_eth0_out"} = $bytesOut; $this->{DATA}->{"raw_net_eth0_err"} = 0; $this->{DATA}->{"raw_net_total_traffic_in"} = $bytesIn; $this->{DATA}->{"raw_net_total_traffic_out"} = $bytesOut; $this->{NETWORKINTERFACES}->{"eth0"} = "eth0"; $this->{NETWORKINTERFACES}->{"total_traffic"} = "total_traffic"; } return; } if (opendir my $dh, "/sys/class/net"){ my @things = grep {$_ ne '.' and $_ ne '..' } readdir $dh; foreach my $thing (@things) { my $link = readlink("/sys/class/net/".$thing); if (defined($link) && index($link, "/virtual/")<0){ $this->{NETWORKINTERFACES}->{$thing} = $thing; } } } my $total_traffic_in=0; my $total_traffic_out=0; if(open(NET_DEV, ") { if($line =~ /\s*(\w+):\s*(\d+)\s+\d+\s+(\d+)\s+\d+\s+\d+\s+\d+\s+\d+\s+\d+\s+(\d+)\s+\d+\s+(\d+)/){ if ( exists($this->{NETWORKINTERFACES}->{$1}) ){ $this->{DATA}->{"raw_net_$1"."_in"} = $2; $this->{DATA}->{"raw_net_$1"."_out"} = $4; $this->{DATA}->{"raw_net_$1"."_errs"} = $3 + $5; # in and out errors $total_traffic_in += $2; $total_traffic_out += $4; } } } close NET_DEV; }else{ logger("NOTICE", "ProcInfo: cannot open /proc/net/dev"); } $this->{DATA}->{"raw_net_total_traffic_in" } = $total_traffic_in; $this->{DATA}->{"raw_net_total_traffic_out"} = $total_traffic_out; $this->{NETWORKINTERFACES}->{"total_traffic"} = "total_traffic"; } # run nestat # Note: this works on MAC OS X sub readNetStat { my $this = shift; if(open(NETSTAT, 'env PATH=$PATH:/usr/sbin netstat -an 2>/dev/null |')){ my %sockets = map { +"sockets_$_" => 0 } ('tcp', 'udp', 'unix'); #icm will be auto added on mac my %tcp_details = map { +"sockets_tcp_$_" => 0 } ('ESTABLISHED', 'SYN_SENT', 'SYN_RECV', 'FIN_WAIT1', 'FIN_WAIT2', 'TIME_WAIT', 'CLOSED', 'CLOSE_WAIT', 'LAST_ACK', 'LISTEN', 'CLOSING', 'UNKNOWN'); if ($Config{osname} eq "solaris"){ my $sockclass; while (my $line = ){ if ($line =~ /^UDP:/){ $sockclass = "udp"; $line = ; $line = ; next; } if ($line =~ /^TCP:/){ $sockclass = "tcp"; $line = ; $line = ; next; } if ($line =~ /^SCTP:/){ $sockclass = "sctp"; $line = ; $line = ; next; } if ($line =~ /^Active UNIX domain sockets/){ $sockclass = "unix"; $line = ; next; } chomp ($line); if (length($line) == 0){ undef $sockclass; next; } if (defined($sockclass)){ if ($sockclass eq "tcp"){ if ($line =~ /\s+(\w+)\s*$/){ $sockets{"sockets_tcp"}++; my $state = uc($1); if (not defined($tcp_details{"sockets_tcp_".$state})){ $tcp_details{"sockets_tcp_".$state} = 0; } $tcp_details{"sockets_tcp_".$state}++; } } else{ if (not defined($sockets{"sockets_$sockclass"})){ $sockets{"sockets_$sockclass"} = 0; } $sockets{"sockets_$sockclass"}++; } } } } else{ while (my $line = ) { $line =~ s/\s+$//; my $proto = ($line =~ /^([^\s]+)/ ? $1 : ""); my $state = ($line =~ /([^\s]+)$/ ? $1 : ""); $proto = "unix" if $line =~ /stream/i || $line =~ /dgram/i; if($proto =~ /tcp/){ $sockets{"sockets_tcp"}++; $tcp_details{"sockets_tcp_".$state}++; }elsif($proto =~ /udp/){ $sockets{"sockets_udp"}++; }elsif($proto =~ /icm/){ $sockets{"sockets_icm"}++; }elsif($proto =~ /unix/){ $sockets{"sockets_unix"}++; } } } close NETSTAT; while(my ($key, $value) = each(%sockets)){ $this->{DATA}->{$key} = $value; } while(my ($key, $value) = each(%tcp_details)){ $this->{DATA}->{$key} = $value; } } else{ logger("NOTICE", "ProcInfo: cannot run netstat"); } } # internal function that gets the full list of children (pids) for a process (pid) # it returns an empty list if the process has died # Note: This works on MAC OS X sub getChildren { my ($this, $parent) = @_; my @children = (); my %pidmap = (); if(open(PIDS, 'ps -A -o "pid ppid" |')){ $_ = ; # skip header while(){ if(/\s*(\d+)\s+(\d+)/){ $pidmap{$1} = $2; push(@children, $parent) if $1 == $parent; } } close(PIDS); }else{ logger("NOTICE", "ProcInfo: cannot execute ps -A -o \"pid ppid\""); } for(my $i = 0; $i < @children; $i++){ my $prnt = $children[$i]; while( my ($pid, $ppid) = each %pidmap ){ if($ppid == $prnt){ push(@children, $pid); } } } return @children; } # internal function that parses a time formatted like "days-hours:min:sec" and returns the corresponding # number of seconds. sub parsePSElapsedTime { my ($this, $time) = @_; if($time =~ /(\d+)-(\d+):(\d+):(\d+)/){ return $1 * 24 * 3600 + $2 * 3600 + $3 * 60 + $4; }elsif($time =~ /(\d+):(\d+):(\d+)/){ return $1 * 3600 + $2 * 60 + $3; }elsif($time =~ /(\d+):(\d+)/){ return $1 * 60 + $2; }else{ return 0; } } # internal function that parses time formatted like "Tue Feb 7 17:13:17 2006" and the returns the # corresponding number of seconds from EPOCH sub parsePSStartTime { my ($this, $strTime) = @_; if($strTime !~ /\S+\s+(\S+)\s+(\d+)\s+(\d+):(\d+):(\d+)\s+(\d+)/){ return 0; }else{ my ($strMonth, $mday, $hour, $min, $sec, $year) = ($1, $2, $3, $4, $5, $6); my $mon = 0; for my $month (@{$this->{MONTHS}}){ last if $month eq $strMonth; $mon++; } return timelocal($sec, $min, $hour, $mday, $mon, $year); } } # read information about this the JOB_PID process # memory sizes are given in KB # Note: This works on MAC OS X sub readJobInfo { my ($this, $pid) = @_; return unless $pid; my @children = $this->getChildren($pid); logger("DEBUG", "ProcInfo: Children for pid=$pid; are @children."); if(@children == 0){ logger("INFO", "ProcInfo: Job with pid=$pid terminated; removing it from monitored jobs."); $this->removeJobToMonitor($pid); return; } if(open(J_STATUS, 'ps -A -o "pid lstart time %cpu %mem rsz vsz command" |')){ my $line = ; # skip header my ($etime, $cputime, $pcpu, $pmem, $rsz, $vsz, $comm, $fd, $minflt, $majflt) = (0, 0, 0, 0, 0, 0, 0, undef, 0, 0); my $cputime_offset = $this->{JOBS}->{$pid}->{DATA}->{'cpu_time_offset'} || 0; my %mem_cmd_map = (); # this contains all $rsz_$vsz_$command as keys for every pid # it is used to avoid adding several times processes that have multiple threads and appear in # ps as sepparate processes, occupying exacly the same amount of memory. The reason for not adding # them multiple times is that that memory is shared as they are threads. my $crtTime = time(); while($line = ){ chomp $line; $line =~ s/\s+/ /g; $line =~ s/^\s+//; $line =~ s/\s+$//; # line looks like: # "PID STARTED TIME %CPU %MEM RSZ VSZ COMMAND" # "6157 Tue Feb 7 22:15:30 2006 00:00:00 0.0 0.0 428 1452 g++ -O -pipe..." if($line =~ /(\S+) (\S+ \S+ \S+ \S+ \S+) (\S+) (\S+) (\S+) (\S+) (\S+) (.+)/){ my($apid, $stime1, $cputime1, $pcpu1, $pmem1, $rsz1, $vsz1, $comm1) = ($1, $2, $3, abs($4), abs($5), $6, $7, $8); # % can be negative on mac!?! my $isChild = 0; for my $childPid (@children){ if($apid == $childPid){ $isChild = 1; last; } } next if(! $isChild); my $sec = $crtTime - $this->parsePSStartTime($stime1); $etime = $sec if $sec > $etime; # the elapsed time is the maximum of all elapsed $sec = $this->parsePSElapsedTime($cputime1); # times corespornding to all child processes. $cputime += $sec; # total cputime is the sum of cputimes for all processes. $pcpu += $pcpu1; # total %cpu is the sum of all children %cpu. if(! $mem_cmd_map{"$pmem1 $rsz1 $vsz1 $comm1"} ++){ # it's the first thread/process with this memory footprint; add it. $pmem += $pmem1; $rsz += $rsz1; $vsz += $vsz1; # the same is true for the number of opened files my $thisFD = $this->countOpenFD($apid); $fd += $thisFD if (defined $thisFD); } # else not adding memory usage. # Get the number of minor and major page faults if(open(STAT, "/proc/$apid/stat")){ my $line = ; my($pid, $exec, $status, $ppid, $pgrp, $sid, $tty, $tty_grp, $flags, $mflt, $cmflt, $jflt, $cjflt) = split(/\s+/, $line); $minflt += $mflt; $majflt += $jflt; } close(STAT); } } close(J_STATUS); $cputime += $cputime_offset; my $cputime_delta = ($this->{JOBS}->{$pid}->{DATA}->{'cpu_time'} || 0) - $cputime; # note this is the other way around! if($cputime_delta > 0){ # Current time is lower than previous - one of the forked processes finished and # its contribution to the cpu_time disappeared. # We have to recalculate the cputime_offset. Note that in this case, we lose the # cpu_time of the other processes, consumed between these two reports. $cputime_offset += $cputime_delta; $cputime += $cputime_delta; } $cputime_delta = $cputime - ($this->{JOBS}->{$pid}->{DATA}->{'cpu_time'} || 0); # real cpu time delta my $etime_delta = $etime - ($this->{JOBS}->{$pid}->{DATA}->{'run_time'} || 0); # real elapsed time delta my $crtCpuSpeed = $this->{DATA}->{'cpu_MHz'} || 1; my $orgCpuSpeed = $ApMon::Common::CpuMHz || $crtCpuSpeed; #my $freqFact = $crtCpuSpeed / $orgCpuSpeed; # if Cpu speed varies in time, adjust ksi2k factor my $freqFact = 1; $this->{JOBS}->{$pid}->{DATA}->{'run_time'} += $etime_delta; $this->{JOBS}->{$pid}->{DATA}->{'run_ksi2k'} += $etime_delta * $freqFact * $ApMon::Common::KSI2K if $ApMon::Common::KSI2K; $this->{JOBS}->{$pid}->{DATA}->{'cpu_time'} += $cputime_delta; $this->{JOBS}->{$pid}->{DATA}->{'cpu_ksi2k'} += $cputime_delta * $freqFact * $ApMon::Common::KSI2K if $ApMon::Common::KSI2K; $this->{JOBS}->{$pid}->{DATA}->{'cpu_time_offset'} = $cputime_offset; $this->{JOBS}->{$pid}->{DATA}->{'cpu_usage'} = $pcpu; $this->{JOBS}->{$pid}->{DATA}->{'mem_usage'} = $pmem; $this->{JOBS}->{$pid}->{DATA}->{'rss'} = $rsz; $this->{JOBS}->{$pid}->{DATA}->{'virtualmem'} = $vsz; $this->{JOBS}->{$pid}->{DATA}->{'open_files'} = $fd if (defined $fd); $this->{JOBS}->{$pid}->{DATA}->{'page_faults_min'} = $minflt; $this->{JOBS}->{$pid}->{DATA}->{'page_faults_maj'} = $majflt; }else{ logger("NOTICE", "ProcInfo: cannot run ps to see job's status for job $pid"); } } # count the number of open files for the given pid # TODO: find an equivalent for MAC OS X sub countOpenFD { my ($this, $pid) = @_; if ($Config{osname} eq "solaris"){ return undef; } if(opendir(DIR, "/proc/$pid/fd")){ my @list = readdir(DIR); closedir DIR; my $open_files = ($pid == $$ ? @list - 4 : @list - 2); logger("DEBUG", "Counting open_files for $pid: |@list| => $open_files"); return $open_files; }else{ logger("NOTICE", "ProcInfo: cannot count the number of opened files for job $pid"); } return undef; } # if there is an work directory defined, then compute the used space in that directory # and the free disk space on the partition to which that directory belongs # sizes are given in MB # Note: this works on MAC OS X sub readJobDiskUsage { my ($this, $pid) = @_; my $workDir = $this->{JOBS}->{$pid}->{WORKDIR}; return unless $workDir and -d $workDir; if(open(DU, "du -Lsck $workDir | tail -1 | cut -f 1 |")){ my $line = ; if($line){ chomp $line; $this->{JOBS}->{$pid}->{DATA}->{'workdir_size'} = $line / 1024.0; }else{ logger("NOTICE", "ProcInfo: cannot get du output for job $pid"); } close(DU); }else{ logger("NOTICE", "ProcInfo: cannot run du to get job's disk usage for job $pid"); } if(open(DF, "df -k $workDir | tail -1 |")){ my $line = ; if($line){ chomp $line; if($line =~ /\S+\s+(\S+)\s+(\S+)\s+(\S+)\s+(\S+)%/){ $this->{JOBS}->{$pid}->{DATA}->{'disk_total'} = $1 / 1024.0; $this->{JOBS}->{$pid}->{DATA}->{'disk_used'} = $2 / 1024.0; $this->{JOBS}->{$pid}->{DATA}->{'disk_free'} = $3 / 1024.0; $this->{JOBS}->{$pid}->{DATA}->{'disk_usage'} = $4; } }else{ logger("NOTICE", "ProcInfo: cannot get df output for job $pid"); } close(DF); }else{ logger("NOTICE", "ProcInfo: cannot run df to get job's disk usage for job $pid"); } } # create cummulative parameters based on raw params like cpu_, blocks_, swap_, or ethX_ sub computeCummulativeParams { my ($this, $dataRef, $prevDataRef) = @_; if(scalar(keys %$prevDataRef) == 0){ for my $param (keys %$dataRef){ next if $param !~ /^raw_/; $prevDataRef->{$param} = $dataRef->{$param}; } $prevDataRef->{'TIME'} = $dataRef->{'TIME'}; return; } # cpu -related params if(defined($dataRef->{'raw_cpu_usr'}) && defined($prevDataRef->{'raw_cpu_usr'})){ my %diff = (); my $cpu_sum = 0; for my $param ('cpu_usr', 'cpu_nice', 'cpu_sys', 'cpu_idle', 'cpu_iowait', 'cpu_irq', 'cpu_softirq', 'cpu_steal', 'cpu_guest') { if (defined($dataRef->{"raw_$param"}) && defined($prevDataRef->{"raw_$param"})){ $diff{$param} = $this->diffWithOverflowCheck($dataRef->{"raw_$param"}, $prevDataRef->{"raw_$param"}); $cpu_sum += $diff{$param}; } } for my $param ('cpu_usr', 'cpu_nice', 'cpu_sys', 'cpu_idle', 'cpu_iowait', 'cpu_irq', 'cpu_softirq', 'cpu_steal', 'cpu_guest') { if (defined($dataRef->{"raw_$param"}) && defined($prevDataRef->{"raw_$param"})){ if($cpu_sum != 0){ $dataRef->{$param} = 100.0 * $diff{$param} / $cpu_sum; }else{ delete $dataRef->{$param}; } } } if($cpu_sum != 0){ $dataRef->{'cpu_usage'} = 100.0 * ($cpu_sum - $diff{'cpu_idle'}) / $cpu_sum; }else{ delete $dataRef->{'cpu_usage'}; } # add the other parameters for my $param ('interrupts', 'context_switches'){ if(defined($dataRef->{"raw_$param"}) && defined($prevDataRef->{"raw_$param"})){ $dataRef->{$param} = $this->diffWithOverflowCheck($dataRef->{"raw_$param"}, $prevDataRef->{"raw_$param"}); } } } # interrupts, context switches, swap & blocks - related params my $interval = $dataRef->{TIME} - $prevDataRef->{TIME}; for my $param ('blocks_in', 'blocks_out', 'swap_in', 'swap_out', 'interrupts', 'context_switches') { if(defined($dataRef->{"raw_$param"}) && defined($prevDataRef->{"raw_$param"}) && ($interval != 0)){ my $diff = $this->diffWithOverflowCheck($dataRef->{"raw_$param"}, $prevDataRef->{"raw_$param"}); $dataRef->{$param."_R"} = $diff / $interval; }else{ delete $dataRef->{$param."_R"}; } } # physical network interfaces - related params for my $rawParam (keys %$dataRef){ next if $rawParam !~ /^raw_net_/; next if ! defined($prevDataRef->{$rawParam}); my $param = $1 if($rawParam =~ /raw_net_(.*)/); if($interval != 0){ $dataRef->{$param} = $this->diffWithOverflowCheck($dataRef->{$rawParam}, $prevDataRef->{$rawParam}); # absolute difference $dataRef->{$param} = $dataRef->{$param} / $interval / 1024.0 if($param !~ /_errs$/); # if it's _in or _out, compute in KB/sec }else{ delete $dataRef->{$param}; } } # copy contents of the current data values to the for my $param (keys %$dataRef){ next if $param !~ /^raw_/; $prevDataRef->{$param} = $dataRef->{$param}; } $prevDataRef->{'TIME'} = $dataRef->{'TIME'}; } # Return the array image of a hash with the requested parameters (from paramsRef) # sorted alphabetically # The cummulative parameters are computed based on $prevDataRef # As a side effect, prevDataRef is updated to have the values in dataRef. sub getFilteredData { my ($this, $dataRef, $paramsRef, $prevDataRef, $networkInterfaces) = @_; # we don't do this for jobs $this->computeCummulativeParams($dataRef, $prevDataRef) if($prevDataRef); my %result = (); for my $param (@$paramsRef) { if($param eq "net_sockets"){ for my $key (keys %$dataRef) { $result{$key} = $dataRef->{$key} if $key =~ /sockets_[^_]+$/; } }elsif($param eq "net_tcp_details"){ for my $key (keys %$dataRef) { $result{$key} = $dataRef->{$key} if $key =~ /sockets_tcp_/; } }elsif($param =~ /^net_(.*)$/ or $param =~ /^(ip)$/){ my $net_param = $1; for my $key (keys %$dataRef) { if ($key =~ /^(\w+)_$net_param/ ){ if ( exists ($networkInterfaces->{$1}) ){ $result{$key} = $dataRef->{$key}; } } } }elsif($param eq "processes"){ for my $key (keys %$dataRef) { $result{$key} = $dataRef->{$key} if $key =~ /^processes/; } }elsif($param =~ /blocks_|swap_|interrupts|context_switches/){ for my $key (keys %$dataRef) { $result{$key} = $dataRef->{$key} if $key =~ /^${param}_R$/; } $result{$param} = $dataRef->{$param} if($param =~/^swap_/ && defined($dataRef->{$param})); } else{ $result{$param} = $dataRef->{$param} if defined $dataRef->{$param}; } } my @sorted_result = (); for my $key (sort (keys %result)) { push(@sorted_result, $key, $result{$key}); } return @sorted_result; } ###################################################################################### # Interesting functions for other modules: # This should be called from time to time to update the monitored data, # but not more often than once a second because of the resolution of time() sub update { my $this = shift; logger("NOTICE", "ProcInfo: Collecting backgound and ".keys(%{$this->{JOBS}})." PIDs monitoring info."); $this->readStat(); $this->readMemInfo(); $this->readUptimeAndLoadAvg(); $this->countProcesses(); $this->readNetworkInfo(); $this->readNetStat(); $this->readEosDiskValues(); $this->readEosRpmValues(); $this->{DATA}->{TIME} = time; $this->readGenericInfo() if (($this->{readGI}++) % 2 == 0); $this->readCPUInfo(); for my $pid (keys %{$this->{JOBS}}) { $this->readJobInfo($pid); $this->readJobDiskUsage($pid); } } # Call this to add another PID to be monitored sub addJobToMonitor { my ($this, $pid, $workDir) = @_; $this->{JOBS}->{$pid}->{WORKDIR} = $workDir; $this->{JOBS}->{$pid}->{DATA} = {}; } # Call this to stop monitoring a PID sub removeJobToMonitor { my ($this, $pid) = @_; delete $this->{JOBS}->{$pid}; } # Return a filtered hash containting the system-related parameters and values sub getSystemData { my ($this, $paramsRef, $prevDataRef) = @_; my @ret = $this->getFilteredData($this->{DATA}, $paramsRef, $prevDataRef, $this->{NETWORKINTERFACES}); #print Dumper(@ret); return @ret; } # Return a filtered hash containing the job-related parameters and values sub getJobData { my ($this, $pid, $paramsRef) = @_; return $this->getFilteredData($this->{JOBS}->{$pid}->{DATA}, $paramsRef, $this->{NETWORKINTERFACES}); } 1; ================================================ FILE: ApMon/perl/ApMon/ApMon/XDRUtils.pm ================================================ package ApMon::XDRUtils; use strict; use warnings; my $XDR_STRING = 0; my $XDR_INT32 = 2; my $XDR_REAL64 = 5; my $MAX_INT = 1<<31; # Encode a set of parameters in the following format: # |clusterName | nodeName | time | #params | # | paramName | paramType | paramValue| x #params # and time, if != -1 sub encodeParameters { my ($clusterName, $nodeName, $time, @params) = @_; my $encParams = ""; for(my $i = 0; $i < $#params; $i += 2){ $encParams .= encodeParameter($params[$i], $params[$i+1]); } my $encTime = $time == -1 ? "" : encodeINT32($time); return encodeString($clusterName) . encodeString($nodeName) . encodeINT32(@params/2) . $encParams . $encTime; } # Encode a parameter pair (paramName, paramValue) sub encodeParameter { my ($name, $value) = @_; my $type = getType($value); my $encValue; if ($type == $XDR_INT32) { $encValue = encodeINT32($value); } elsif ($type == $XDR_REAL64) { $encValue = encodeREAL64($value); } else { $encValue = encodeString($value); } return encodeString($name).encodeINT32($type).$encValue; } # Return the type for a given value (XDR_INT32, XDR_REAL64, XDR_STRING) sub getType { $_ = shift; return $XDR_INT32 if(/^[+-]?\d+$/ && (abs($_) < $MAX_INT)); return $XDR_REAL64 if /^([+-]?)(?=\d|\.\d)\d*(\.\d*)?([Ee]([+-]?\d+))?$/; return $XDR_STRING; } # Encode a string in XDR format sub encodeString { my $str = shift; my $enc = encodeINT32(length($str)); while (length($str) % 4 != 0){ $str .= "\0"; } return $enc.$str; } # Encode a 32 bit signed integer in XDR format sub encodeINT32 { my $val = shift; return pack("N", int($val)); } # Encode a 64 bit double in XDR format sub encodeREAL64 { my $val = shift; my $end = verifyEndian(); if ($end == 0) { return reverse(pack("d",$val)); } else { return pack("d",$val); } } # Verify if machine is big-endian or little-endian sub verifyEndian { my $foo = pack("s2",1,2); if ($foo eq "\1\0\2\0" ) { return 0; } elsif ( $foo eq "\0\1\0\2" ) { return 1; } } 1; ================================================ FILE: ApMon/perl/ApMon/ApMon.pm ================================================ =head1 NAME ApMon - Perl extension for sending application information to MonALISA services. =head1 SYNOPSIS use ApMon; # initialize from a URL or from a file my $apm = new ApMon::ApMon("http://some.host.com/destinations.conf"); $apm->sendParameters("Cluster", "Node", "param1", 14.23e-10, "param2", 234); # initalize with default xApMon configuration, and send datagrams directly # to the given host. my $apm = ApMon::ApMon->new(["pcardaab.cern.ch:8884", "localhost"]); $apm->sendParameters("Cluster", "Node", {"x" => 12, "y" => 0.3}); # given xApMon configuration will overwrite the default values. my $apm = ApMon::ApMon->new({ "pcardaab.cern.ch:8884" => {"sys_monitoring" => 0, "job_monitoring" => 1, "general_info" => 1}, "lcfg.rogrid.pub.ro passwd" => {"sys_monitoring" => 1, "general_info" => 0} }); $apm->sendParameters("Cluster", "Node", ["name", "some_name", "value", 23]); =head1 DESCRIPTION ApMon is an API that can be used by any application to send monitoring information to MonALISA services (http://monalisa.cacr.caltech.edu). The monitoring data is sent as UDP datagrams to one or more hosts running MonALISA. The MonALISA host may require a password enclosed in each datagram, for authentication purposes. ApMon can also send datagrams that contain monitoring information regarding the system or the application. =head1 METHODS =over =cut package ApMon; use strict; use warnings; use ApMon::Common qw(logger); use ApMon::ConfigLoader; use ApMon::BgMonitor; use IO::Handle; use POSIX ":sys_wait_h"; use Net::Domain; use Data::Dumper; # Here it is kept a list of child processes that have to be killed before finishing. my @children = (); # Temporary files path my $TMPDIR = (defined $ENV{'TMPDIR'}) ? $ENV{'TMPDIR'} : '/tmp'; =item $apm = new ApMon(@destLocations); This is the constructor for the ApMon class. It can be used with several types of arguments: a list of strings (URLs and/or files) - the configuration will be read from all; a reference to an ARRAY - each element is a destination ML service; for each destination the default options will be used; a reference to a HASH - each key is a destination ML service; for each destination you can define a set of additional options that will overwrite the default ones. You can also leave it empty and initialize ApMon later using the $apm->setDestinations() method. This will create the two background processes (for bg monitoring and configuration update). If you don't want these two processes to be created ever, you can pass the value 0 as single argument. =cut sub new { my ($type, @destLocations) = @_; my $this = {}; bless $this; $this->{CONF_FILE} = "$TMPDIR/confApMon.$$"; # temporary name used to transfer config data from refreshConfig process to the others $this->{LAST_VALUES_FILE} = "$TMPDIR/valuesApMon.$$"; #temporary name used to transfer last monitored data from BgMonitor to the main process $this->{LAST_CONF_CHECK_TIME} = 0; # moment when config was checked last time in sec from Epoch $this->{CONF_RECHECK} = 1; # do check if configuration has changed $this->{CONF_CHECK_INTERVAL} = 20; # default interval to check for changes in config files $this->{DESTINATIONS} = {}; my $hostname = Net::Domain::hostfqdn(); $this->{DEFAULT_CLUSTER} = "ApMon_UserSend"; $this->{DEFAULT_NODE} = $hostname; # decide if we will ever have bg processes if( @destLocations && ref($destLocations[0]) eq "" && $destLocations[0] eq "0" ){ $this->{ALLOW_BG_PROCESSES} = 0; @destLocations = (); }else{ $this->{ALLOW_BG_PROCESSES} = 1; } pipe($this->{UPD_RDR}, $this->{UPD_WTR}); # open a pipe to send messages to Config Loader $this->{UPD_WTR}->autoflush(1); $this->{CONFIG_LOADER} = new ApMon::ConfigLoader($this->{UPD_RDR}, $this->{CONF_FILE}); pipe($this->{BG_RDR}, $this->{BG_WTR}); # open a pipe to send messages to Background Monitor $this->{BG_WTR}->autoflush(1); $this->{BG_MONITOR} = new ApMon::BgMonitor($this->{BG_RDR}, $this->{CONF_FILE}, $this->{LAST_VALUES_FILE}, $this->{ALLOW_BG_PROCESSES}, $this->{CONFIG_LOADER}); # if the configuration is given in the constructor, load it now $this->setDestinations(@destLocations) if @destLocations; $SIG{INT} = \&catch_zap; $SIG{TERM} = \&catch_zap; return $this; } =item $apm->setDestinations(@destLocations); Accept the same parameters as the ApMon constructor =cut sub setDestinations { my ($this, @destLocations) = @_; $this->startBgProcesses(); #logger("INFO", "\$destLocations[0]= .$destLocations[0]. ref = .".ref($destLocations[0])."."); if((ref($destLocations[0]) eq "ARRAY") or (ref($destLocations[0]) eq "HASH")) { # prevent background Config Loader to change this #logger("INFO", "Config is HASH or ARRAY"); ApMon::Common::writeMessage($this->{UPD_WTR}, "conf_recheck:0\n") if @children; }else{ #logger("INFO", "Config is string = .@destLocations."); my $msg = "conf_recheck:1\n"; for my $dest (@destLocations) { $msg .= "dest:$dest\n"; } # send this to background Config Loader for later updates ApMon::Common::writeMessage($this->{UPD_WTR}, $msg) if @children; } # perform the change now, regardless of the existence of background Config Loader $this->{CONFIG_LOADER}->setDestinations(@destLocations); $this->enableBgMonitoring(1); } =item $apm->addJobToMonitor($pid, $workDir, $clusterName, $nodeName); Add another job to be monitored. A job is a tree of processes, starting from the given PID that has files in workDir directory. If workDir in "", no disk measurements will be performed. All produced parameters will be sent to all interested destinations using the given cluster and node names. =cut sub addJobToMonitor { my ($this, $pid, $workDir, $clusterName, $nodeName) = @_; ApMon::Common::writeMessage($this->{BG_WTR}, "pid:$pid\nwork_dir:$workDir\nbg_cluster:$clusterName\nbg_node:$nodeName\n") if @children; # also set this to the local copy of the BG_MONITOR in case that user decides to stop background processes $this->{BG_MONITOR}->addJobToMonitor($pid, $workDir, $clusterName, $nodeName); } =item $apm->removeJobToMonitor($pid); Remove a tree of processes, starting with PID from being monitored. =cut sub removeJobToMonitor { my ($this, $pid) = @_; ApMon::Common::writeMessage($this->{BG_WTR}, "rm_pid:$pid\n") if @children; # also set this to the local copy of the BG_MONITOR in case that user decides to stop background processes $this->{BG_MONITOR}->removeJobToMonitor($pid); } =item $apm->setMonitorClusterNode($clusterName, $nodeName); This is used to set the cluster and node name for the system-related monitored data. =cut sub setMonitorClusterNode { my ($this, $clusterName, $nodeName) = @_; ApMon::Common::writeMessage($this->{BG_WTR}, "bg_cluster:$clusterName\nbg_node:$nodeName\n") if @children; # also set this to the local copy of the BG_MONITOR in case that user decides to stop background processes $this->{BG_MONITOR}->setMonitorClusterNode($clusterName, $nodeName); } =item $apm->setConfRecheck($onOff [, $interval]); Call this function in order to enable or disable the configuration recheck. If you enable it, you may want to pass a second parameter, that specifies the number of seconds between two configuration rechecks. Note that it makes sense to use configuration recheck only if you get the configuration from (a set of) files and/or URLs. =cut sub setConfRecheck { my $this = shift; my $onOff = shift; my $interval = shift || 120; $this->{CONF_RECHECK} = $onOff; $this->{CONF_CHECK_INTERVAL} = $interval; ApMon::Common::writeMessage($this->{UPD_WTR}, "conf_recheck:$onOff\nrecheck_interval:$interval\n") if @children; } =item $apm->sendParams(@params); Use this to send a set of parameters without specifying a cluster and a node name. In this case, the default values for cluster and node name will be used. See the sendParameters function for more details. =cut sub sendParams { my ($this, @params) = @_; $this->sendTimedParams(-1, @params); } =item $apm->sendParameters($clusterName, $nodeName, @params); Use this to send a set of parameters to all given destinations. The default cluster an node names will be updated with the values given here. If afterwards you want to send more parameters, you can use the shorter version of this function, sendParams. The parameters to be sent can be eiter a list, or a reference to a list. This list should have an even length and should contain pairs like (paramName, paramValue). paramValue can be a string, an int or a float. =cut sub sendParameters { my ($this, $clusterName, $nodeName, @params) = @_; $this->sendTimedParameters($clusterName, $nodeName, -1, @params); } =item $apm->sendTimedParams($time, @params); This is the short version of the sendTimedParameters that uses the default cluster and node name to sent the parameters and allows you to specify a time (in seconds from Epoch) for each packet. =cut sub sendTimedParams { my ($this, $time, @params) = @_; $this->sendTimedParameters($this->{DEFAULT_CLUSTER}, $this->{DEFAULT_NODE}, $time, @params); } =item $apm->sendTimedParameters($clusterName, $nodeName, $time, @params); Use this instead of sendParameters to set the time for each packet that is sent. The time is in seconds from Epoch. If you use the other function, the time for these parameters will be sent by the MonALISA serice that receives them. =cut sub sendTimedParameters { my ($this, $clusterName, $nodeName, $time, @params) = @_; ApMon::Common::updateConfig($this); if((! defined($clusterName)) || (! defined($nodeName))){ logger("WARNING", "ClusterName or NodeName are undefined. Not sending given parameters!"); return; } $this->{DEFAULT_CLUSTER} = $clusterName; $this->{DEFAULT_NODE} = $nodeName; if(scalar (keys %{$this->{DESTINATIONS}})){ for my $dest (keys %{$this->{DESTINATIONS}}){ ApMon::Common::directSendParameters($dest, $clusterName, $nodeName, $time, \@params); } }else{ logger("WARNING", "No destinations defined for sending parameters"); } } =item $apm->sendBgMonitoring(); Send NOW the background monitoring information to the interested destinations. Note that this uses the current process and not the background one. So, if you stop the background processes you can still use this call to send the background information (both about system and jobs) whenever you want. If $mustSend is != 0, the bgMonitoring data is sent regardles of when it was last time sent. This allows sending a 'last result', just before the end of a job, and which can happen anytime. =cut sub sendBgMonitoring { my $this = shift; my $mustSend = shift || 0; $this->{BG_MONITOR}->sendBgMonitoring($mustSend); } =item $apm->getSysMonInfo('param_name1', 'param_name2', ...); IF and ONLY IF sendBgMonitoring() was called before, either called by user or by the BgMonitoring process, the last system monitored values for the requested parameters will be returned. Note that the requested parameters must be among the monitored ones. If there is no avaialbe parameter among the requested ones, it returns undef. =cut sub getSysMonInfo { my $this = shift; $this->update_last_values(); return $this->filter_params($this->{LAST_VALUES}->{BG_MON_VALUES}, @_); } =item $apm->getJobMonInfo($pid, 'param_name1', 'param_name2', ...); IF and ONLY IF sendBgMonitoring() was called before, either called by user or by the BgMonitoring process, the last job monitored values for the given PID will be returned. Note that the requested parameters must be among the monitored ones. If there is no avaialbe parameter among the requested ones, it returns undef. =cut sub getJobMonInfo { my $this = shift; my $pid = shift; $this->update_last_values(); return $this->filter_params($this->{LAST_VALUES}->{JOBS}->{$pid}->{BG_MON_VALUES}, @_); } =item $apm->enableBgMonitoring($onOff); This allows enabling and disabling of the background monitoring. Note that this doesn't stop the background monitor process; Note also that this is called by default by setDestinations () to enable the background monitoring once the destination is set. It doesn't make sense to call this if you have stopped the background processes. =cut sub enableBgMonitoring { my ($this, $onOff) = @_; ApMon::Common::writeMessage($this->{BG_WTR}, "bg_enable:$onOff\n") if @children; } =item $apm->refreshConfig(); Call this function to force refreshing the temporary config file and make sure that at the next send, the new configuration will be used. Note that it makes sense to use this only if you load the configuration from (a set of) files and/or URLs. Also note that fetching the configuration files from an URL might take some time, depending on network conditions. =cut sub refreshConfig { my $this = shift; $this->{LAST_CONF_CHECK_TIME} = 0; $this->{CONFIG_LOADER}->refreshConfig(); } =item $apm->startBgProcesses(); This can be called in order to start the background processes (conf loader and bg monitor). It is called by default if configuration is read from a file or from a URL (not when you give a hash or an array for destinations). =cut sub startBgProcesses { my $this = shift; if(! $this->{ALLOW_BG_PROCESSES}){ logger("DEBUG", "Not starting bg processes since they are not allowed."); return; } if(@children){ logger("INFO", "Bg processes already started!"); return; } logger("INFO", "starting bg processes"); my $pid; # start the Config Loader process and retrieve the config periodically $pid = fork(); if(! defined $pid){ logger("FATAL", "cannot fork: $!"); die; } if ($pid == 0) { # child $this->{CONFIG_LOADER}->run(); exit(0); } # parent push(@children, $pid); undef $pid; # start the Background Monitoring process $pid = fork(); if(! defined $pid){ logger("FATAL", "cannot fork: $!"); die; } if($pid == 0) { # child $this->{BG_MONITOR}->run(); exit(0); } # parent push(@children, $pid); } =item $apm->stopBgProcesses(); This can be called to stop all child processes =cut sub stopBgProcesses { my $this = shift; for my $pid (@children) { kill 1, $pid; waitpid($pid, 0); } @children = (); } =item $apm->setLogLevel($level); This sets the logging level for all ApMon components. $level can be one of: "DEBUG", "NOTICE", "INFO", "WARNING", "ERROR", "FATAL". You can also set the log level from the configuration file by specifying xApMon_loglevel = one of the above (without quotes). =cut sub setLogLevel { my ($this, $level) = @_; ApMon::Common::setLogLevel($level); ApMon::Common::writeMessage($this->{UPD_WTR}, "loglevel:$level\n") if @children; $this->{CONFIG_LOADER}->setLogLevel($level); ApMon::Common::writeMessage($this->{BG_WTR}, "loglevel:$level\n") if @children; $this->{BG_MONITOR}->setLogLevel($level); } =item $apm->setMaxMsgRate($rate); This sets the maxim number of messages that can be sent to a MonALISA service, per second. By default, it is 50. This is a very large number, and the idea is to prevent errors from the user. One can easily put in a for loop, without any sleep, some sendParams calls that can generate a lot of unnecessary network load. =cut sub setMaxMsgRate { my ($this, $rate) = @_; ApMon::Common::setMaxMsgRate($rate); ApMon::Common::writeMessage($this->{UPD_WTR}, "maxMsgRate:$rate\n") if @children; $this->{CONFIG_LOADER}->setMaxMsgRate($rate); ApMon::Common::writeMessage($this->{BG_WTR}, "maxMsgRate:$rate\n") if @children; $this->{BG_MONITOR}->setMaxMsgRate($rate); } =item $apm->getCpuType(); This returns a hash with the cpu type: cpu_model_name, cpu_MHz, cpu_cache (in KB). This call is meant to be used together with setCpuSI2k, to establish a SpecInt performance meter. If it cannot get the cpu type, it returns undef =cut sub getCpuType { my $this = shift; my $cpuType = ApMon::Common::getCpuType(); ApMon::Common::writeMessage($this->{BG_WTR}, "cpu_mhz:$ApMon::Common::CpuMHz\n") if(@children && $ApMon::Common::CpuMHz); return $cpuType; } =item $apm->setCpuSI2k(si2k); This sets the SpecINT2000 meter for the current machine. Consequently, jobs will also report cpu_ksi2k, based on this value and cpu_time. =cut sub setCpuSI2k { my ($this, $si2k) = @_; ApMon::Common::setCpuSI2k($si2k); ApMon::Common::writeMessage($this->{BG_WTR}, "cpu_si2k:$si2k\n") if @children; } =item $apm->free(); This function stops the background processes and removes the temporary file. After this call, the ApMon object must be recreated in order to be used. It is provided for exceptional cases when you have to recreate over and over again the ApMon object; you have to free it when you don't need anymore. =cut sub free { my $this = shift; $this->stopBgProcesses(); #close(ApMon::Common::SOCKET); unlink("$TMPDIR/confApMon.$$"); unlink("$TMPDIR/valuesApMon.$$"); } ################################################################################################## # The following is internal stuff. # This is called if uses presses CTRL+C or kill is sent to me sub catch_zap { logger("DEBUG", "Killed! Removing temp files $TMPDIR/{conf,values}ApMon.$$") if defined &logger; unlink("$TMPDIR/confApMon.$$"); unlink("$TMPDIR/valuesApMon.$$"); stopBgProcesses("dummy"); exit(0); } # from the given hash, based on the givn list of parameters, build a hash will all available # if the resulting list is empty, return undef. sub filter_params { my $this = shift; my $h_src = shift || {}; my $h_res = {}; for my $key (@_){ $h_res->{$key} = $h_src->{$key} if defined($h_src->{$key}); } return (scalar(keys(%$h_res)) == 0 ? undef : $h_res); } # Update the last bg monitoring values hash with the contents of the LAST_VALUES_FILE. # Note that this is produced only after sendBgMonitoring was run, either from the main # process or the BgMonitor process. sub update_last_values { my $this = shift; my $now = time; return if $this->{LAST_VALUES_TIME} && ($now - $this->{LAST_VALUES_TIME} < 2); if(open(F, "<$this->{LAST_VALUES_FILE}")){ my @lines = ; close F; my $VAR1; $this->{LAST_VALUES} = eval join("", @lines); logger("ERROR", "Error restoring the last bg monitoring values from file $this->{LAST_VALUES_FILE}:\n$@") if $@; $this->{LAST_VALUES_TIME} = $now; }else{ logger("WARNING", "Cannot read the last bg monitoring values from $this->{LAST_VALUES_FILE}"); } } END { unlink("$TMPDIR/confApMon.$$"); unlink("$TMPDIR/valuesApMon.$$"); stopBgProcesses("dummy"); } 1; __END__ =back =head1 AUTHOR Catalin Cirstoiu =head1 COPYRIGHT AND LICENSE This module is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, either expressed or implied. This library is free software; you can redistribute or modify it under the same terms as Perl itself. =cut ================================================ FILE: ApMon/perl/ApMon/sendToML.sh ================================================ #!/bin/bash # Sample shell script that sends the given parameters to the ML service # running on the same machine using ApMon. # # 2007-04-03 # Catalin.Cirstoiu@cern.ch if [ $# -lt 2 ] ; then cat </dev/null` lines=`ps -p $pid 2>/dev/null | wc -l` if [ "$lines" -eq 2 ] ; then # there is a previous ApMon instance running if [ -n "$force" ] ; then echo "Killing previous ApMon instance with pid $pid ..." kill -s 1 $pid 2>/dev/null ; sleep 1 lines=`ps -p $pid 2>/dev/null | wc -l` if [ "$lines" -eq 2 ] ; then echo "Failed killing ApMon instance with pid $pid! Trying with -9..." kill -s 9 $pid 2>/dev/null ; sleep 1 lines=`ps -p $pid 2>/dev/null | wc -l` if [ "$lines" -eq 2 ] ; then echo "Failed killing -9 ApMon instance with pid $pid!!! Aborting." exit -1 fi fi else # force flag is not set; just exit exit 1 fi fi fi if [ -n "$justKill" ] ; then exit 0; fi #Set the destination for the monitoring information #destination="\"http://monalisa2.cern.ch/~catac/apmon/destinations.conf\"" #destination="['pcardaab.cern.ch:8884']" #destination="{'pcardaab.cern.ch' => {loglevel => 'NOTICE'}}" MONALISA_HOST=${MONALISA_HOST:-"localhost"} APMON_DEBUG_LEVEL=${APMON_DEBUG_LEVEL:-"WARNING"} destination=${APMON_CONFIG:-"['$MONALISA_HOST']"} #Finally, run the perl interpreter with a small program that sends all these parameters exe="use strict; use warnings; use ApMon; my \$apm = new ApMon(0); \$apm->setLogLevel('$APMON_DEBUG_LEVEL'); \$apm->setDestinations($destination); \$apm->setMonitorClusterNode('${hostGroup}_Nodes', '$host');$srvMonCmds while(1){ \$apm->sendBgMonitoring(); sleep(120); } " #echo "Exe = [$exe]" export PERL5LIB=`dirname $0` if [ -n "$pidfile" ] ; then # pid file given; run in background logfile="`dirname $pidfile`/`basename $pidfile .pid`.log" echo -e "`date` Starting ApMon in background mode...\nlogfile in: $logfile\npidfile in: $pidfile" | tee $logfile perl -e "$exe" > $logfile 2>&1 & pid=$! echo $pid > $pidfile else # pid file not given; run in interactive mode echo -e "`date` Starting ApMon in interactive mode..." exec perl -e "$exe" fi ================================================ FILE: ApMon/run.sh ================================================ #!/bin/sh [ -f /etc/sysconfig/eos ] && . /etc/sysconfig/eos [ -f /etc/sysconfig/eos_env ] && . /etc/sysconfig/eos_env cleanup() { # kill all subprocesses for pid in $(ps --ppid $$ --forest -o pid --no-headers); do kill $pid &> /dev/null done exit 0 } trap cleanup SIGINT SIGTERM if [ -z "${MONALISAHOST}" ]; then echo "error: please configure the MONALISAHOST variable in /etc/sysconfig/eos first!" exit 1 fi eosuser=daemon xrdpid=$(pgrep -u "${eosuser}" xrootd | head -1) if [ -z "${xrdpid}" ]; then xrdpid=999999 fi export PERL5LIB=$(perl -V:installsitearch | cut -d "'" -f 2)/ApMon runuser -u ${eosuser} -- /opt/eos/apmon/eosapmond ${MONALISAHOST} /var/log/eos/apmon/apmon.log ${APMON_DEBUG_LEVEL:-"WARNING"} ${APMON_INSTANCE_NAME:-"unconfigured"} ${HOSTNAME} ${xrdpid} & wait ================================================ FILE: ApMon/usr/lib/systemd/system/eosapmond.service ================================================ # systemd service unit file for eosapmond # Author: Gianmaria Del Monte # Description: Systemd service to start apmon for monitoring xrootd on EOS [Unit] Description=Starts apmon to monitor xrootd on EOS After=network.target StartLimitBurst=3 StartLimitIntervalSec=5 [Service] ExecStart=/opt/eos/apmon/run.sh User=root Restart=always [Install] WantedBy=multi-user.target ================================================ FILE: CMakeLists.txt ================================================ # ---------------------------------------------------------------------- # File: CMakeLists.txt # Author: Andreas-Joachim Peters - CERN # ---------------------------------------------------------------------- # ************************************************************************ # * EOS - the CERN Disk Storage System * # * Copyright (C) 2011 CERN/Switzerland * # * * # * This program is free software: you can redistribute it and/or modify * # * it under the terms of the GNU General Public License as published by * # * the Free Software Foundation, either version 3 of the License, or * # * (at your option) any later version. * # * * # * This program is distributed in the hope that it will be useful, * # * but WITHOUT ANY WARRANTY; without even the implied warranty of * # * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * # * GNU General Public License for more details. * # * * # * You should have received a copy of the GNU General Public License * # * along with this program. If not, see .* # ************************************************************************ cmake_minimum_required (VERSION 3.16...3.30 FATAL_ERROR) # Set default build type if not set. This must be done before calling project() if(NOT CMAKE_BUILD_TYPE AND NOT GENERATOR_IS_MULTI_CONFIG) if(NOT CMAKE_C_FLAGS AND NOT CMAKE_CXX_FLAGS) set(CMAKE_BUILD_TYPE RelWithDebInfo CACHE STRING "Build type: Debug Release RelWithDebInfo MinSizeRel None(use CMAKE_CXX_FLAGS)") endif() endif() project(eos DESCRIPTION "EOS Open Storage" LANGUAGES C CXX ASM) # Insert cmake/ before everything else in the CMake module path list(INSERT CMAKE_MODULE_PATH 0 "${PROJECT_SOURCE_DIR}/cmake") option(CCACHE "Use ccache for compilation" ON) if(CCACHE) find_program(CCACHE_COMMAND ccache ccache-swig) mark_as_advanced(CCACHE_COMMAND ${CCACHE_COMMAND}) if(EXISTS ${CCACHE_COMMAND}) message(VERBOSE "Found ccache: ${CCACHE_COMMAND}") set_property(GLOBAL PROPERTY RULE_LAUNCH_COMPILE ${CCACHE_COMMAND}) else() message(VERBOSE "Could NOT find ccache") set(CCACHE OFF CACHE BOOL "Use ccache for compilation (disabled)" FORCE) endif() endif() #------------------------------------------------------------------------------- # Activate include-what-you-use #------------------------------------------------------------------------------- option(ENABLE_IWYU "Enable include-what-you-use tool" OFF) if(ENABLE_IWYU) find_program(IWYU_PATH NAMES include-what-you-use iwyu) if(NOT IWYU_PATH) message(FATAL_ERROR "Could not find include-what-you-use") endif() set(CMAKE_CXX_INCLUDE_WHAT_YOU_USE ${IWYU_PATH}) set(CMAKE_C_INCLUDE_WHAT_YOU_USE ${IWYU_PATH}) endif() #------------------------------------------------------------------------------- # Include code coverage module #------------------------------------------------------------------------------- option(COVERAGE "Build with test coverage reporting" OFF) if (COVERAGE) include(EosCoverage) endif() #------------------------------------------------------------------------------- # Include generic functions and compiler definition parameters #------------------------------------------------------------------------------- if (CMAKE_INSTALL_PREFIX_INITIALIZED_TO_DEFAULT) set(CMAKE_INSTALL_PREFIX "/usr" CACHE PATH "Default install prefix: /usr" FORCE) endif () include(EosUtils) EOS_CheckOutOfSourceBuild() EOS_GetUidGid("daemon" "DAEMONUID" "DAEMONGID") EOS_GetVersion("${VERSION_MAJOR}" "${VERSION_MINOR}" "${VERSION_PATCH}" "${RELEASE}") include(EosOSDefaults) if (NOT PACKAGEONLY) include(EosCompileFlags) endif() set(CMAKE_INSTALL_SYSCONFDIR /etc) include(EosFindLibs) include(CTest) #------------------------------------------------------------------------------- # Make gtest / gmock available for all downstream CMakeLists.txt that need it #------------------------------------------------------------------------------- option(USE_SYSTEM_GTEST "Use GoogleTest installed in the system if found" OFF) if(USE_SYSTEM_GTEST) find_package(GTest REQUIRED) else() add_subdirectory(unit_tests/googletest EXCLUDE_FROM_ALL) # Add alias libraries to emulate same behavior as external GoogleTest add_library(GTest::GTest ALIAS gtest) add_library(GTest::Main ALIAS gtest_main) endif() #------------------------------------------------------------------------------- # Generate documentation #------------------------------------------------------------------------------- if (Python3_Interpreter_FOUND AND SPHINX_FOUND) add_custom_target(doc COMMAND python3 generate_docs.py WORKING_DIRECTORY "${PROJECT_SOURCE_DIR}/doc" COMMENT "Build HTML documentation with Sphinx ...") endif () #------------------------------------------------------------------------------- # Generate man pages #------------------------------------------------------------------------------- if (BUILD_MANPAGES AND HELP2MAN_FOUND) add_subdirectory(man) endif() #------------------------------------------------------------------------------- # Build qclient static library #------------------------------------------------------------------------------- include_directories(${CMAKE_SOURCE_DIR}) add_subdirectory(common) add_subdirectory(proto) add_subdirectory(fst) add_subdirectory(console) add_subdirectory(fusex) add_subdirectory(misc) add_subdirectory(test) add_subdirectory(namespace/ns_quarkdb/qclient) if (NOT CLIENT) add_subdirectory(client) add_subdirectory(mgm) add_subdirectory(namespace) add_subdirectory(utils) add_subdirectory(archive) add_subdirectory(auth_plugin) add_subdirectory(unit_tests) add_subdirectory(quarkdb) endif () #------------------------------------------------------------------------------- # Uninstall target #------------------------------------------------------------------------------- configure_file( "${CMAKE_CURRENT_SOURCE_DIR}/cmake/cmake_uninstall.cmake.in" "${CMAKE_CURRENT_BINARY_DIR}/cmake/cmake_uninstall.cmake" IMMEDIATE @ONLY) add_custom_target( uninstall "${CMAKE_COMMAND}" -P "${CMAKE_CURRENT_BINARY_DIR}/cmake/cmake_uninstall.cmake") #------------------------------------------------------------------------------- # Packaging #------------------------------------------------------------------------------- set(CPACK_SOURCE_GENERATOR "TGZ") set(CPACK_PACKAGE_NAME "${CMAKE_PROJECT_NAME}") set(CPACK_PACKAGE_VERSION "${VERSION}") set(CPACK_PACKAGE_VERSION_MAJOR "${VERSION_MAJOR}") set(CPACK_PACKAGE_VERSION_MINOR "${VERSION_MINOR}") set(CPACK_PACKAGE_VERSION_PATCH "${VERSION_PATCH}") set(CPACK_PACKAGE_RELEASE "${RELEASE}") set(CPACK_SOURCE_PACKAGE_FILE_NAME "${CMAKE_PROJECT_NAME}-${CPACK_PACKAGE_VERSION}-${CPACK_PACKAGE_RELEASE}") set(CPACK_SOURCE_IGNORE_FILES "${CMAKE_CURRENT_BINARY_DIR};/ApMon/;/git/;/gitlab-ci/;/ccache/;/xrootd-dsi/;/nginx/;/dsi/;\ ;/grpc/eos-grpc.spec;/.deps/;~$;'.'o$;/lib/;/.git/;eos.spec.in;elrepopackage.spec;.tar.gz$;\ .tar.bz2$;${CPACK_SOURCE_IGNORE_FILES};") set(EOS_TUI_VERSION "0.2.3") configure_file( "${CMAKE_CURRENT_SOURCE_DIR}/cmake/config_spec.cmake.in" "${CMAKE_CURRENT_BINARY_DIR}/cmake/config_spec.cmake" @ONLY IMMEDIATE) add_custom_command( OUTPUT "${CMAKE_CURRENT_SOURCE_DIR}/eos.spec" COMMAND ${CMAKE_COMMAND} -P "${CMAKE_CURRENT_BINARY_DIR}/cmake/config_spec.cmake" DEPENDS "${CMAKE_CURRENT_BINARY_DIR}/cmake/config_spec.cmake" "${CMAKE_CURRENT_SOURCE_DIR}/eos.spec.in") add_custom_target( dist COMMAND ${CMAKE_MAKE_PROGRAM} package_source DEPENDS "${CMAKE_CURRENT_SOURCE_DIR}/eos.spec") add_custom_command( TARGET dist POST_BUILD COMMAND rm ARGS -rf "${CMAKE_CURRENT_SOURCE_DIR}/eos.spec" COMMENT "Clean generated spec file") include(CPack) #------------------------------------------------------------------------------- # Source and binary rpms #------------------------------------------------------------------------------- set(EOS_ARCHIVE "${CMAKE_PROJECT_NAME}-${CPACK_PACKAGE_VERSION}-${CPACK_PACKAGE_RELEASE}.tar.gz") set(SRPM_DEFINE --define "_source_filedigest_algorithm md5" --define "_binary_filedigest_algorithm md5") if (CMAKE_CXX_COMPILER_ID MATCHES "Clang" OR CLANG_BUILD) LIST(APPEND RPM_OPTIONS --with clang) endif() if (NOT CLIENT) LIST(APPEND RPM_OPTIONS --with server) if (EOS_XROOTD) LIST(APPEND RPM_OPTIONS --with eos_xrootd_rh) endif() endif() if (ASAN) LIST(APPEND RPM_OPTIONS --with asan) endif() if (TSAN) LIST(APPEND RPM_OPTIONS --with tsan) endif() option(NO_SSE "Build without sse instruction set" OFF) if (NO_SSE) LIST(APPEND RPM_OPTIONS --with no_sse) endif() option(EOS_GRPC_GW "Build without eos grpc support" OFF) if (EOS_GRPC_GW) LIST(APPEND RPM_OPTIONS --with eos_grpc_gateway) endif() add_custom_target( srpm COMMAND rpmbuild -ts ${EOS_ARCHIVE} --define "_topdir ${CMAKE_BINARY_DIR}" ${SRPM_DEFINE} ${RPM_OPTIONS}) add_custom_target( rpm COMMAND rpmbuild -tb ${EOS_ARCHIVE} --define "_topdir ${CMAKE_BINARY_DIR}" ${RPM_OPTIONS}) add_dependencies(srpm dist) add_dependencies(rpm dist) include(EosTui) #------------------------------------------------------------------------------- # Custom target to build on OSX #------------------------------------------------------------------------------- add_custom_target( osx COMMAND sudo ../utils/eos-osx-package.sh ${CPACK_PACKAGE_VERSION}) #------------------------------------------------------------------------------- # Custom target to build graphviz for all target #------------------------------------------------------------------------------- include(EosGraphviz) #------------------------------------------------------------------------------- # Print project summary #------------------------------------------------------------------------------- include(EosSummary) ================================================ FILE: CTestConfig.cmake ================================================ set(CTEST_PROJECT_NAME "EOS") set(CTEST_NIGHTLY_START_TIME "00:00:00 UTC") set(CTEST_DROP_SITE_CDASH TRUE) set(CTEST_SUBMIT_URL https://my.cdash.org/submit.php?project=EOS) ================================================ FILE: License ================================================ // GNU GENERAL PUBLIC LICENSE // Version 3, 29 June 2007 // // Copyright (C) 2007 Free Software Foundation, Inc. // Everyone is permitted to copy and distribute verbatim copies // of this license document, but changing it is not allowed. // // Preamble // // The GNU General Public License is a free, copyleft license for // software and other kinds of works. // // The licenses for most software and other practical works are designed // to take away your freedom to share and change the works. By contrast, // the GNU General Public License is intended to guarantee your freedom to // share and change all versions of a program--to make sure it remains free // software for all its users. We, the Free Software Foundation, use the // GNU General Public License for most of our software; it applies also to // any other work released this way by its authors. You can apply it to // your programs, too. // // When we speak of free software, we are referring to freedom, not // price. Our General Public Licenses are designed to make sure that you // have the freedom to distribute copies of free software (and charge for // them if you wish), that you receive source code or can get it if you // want it, that you can change the software or use pieces of it in new // free programs, and that you know you can do these things. // // To protect your rights, we need to prevent others from denying you // these rights or asking you to surrender the rights. Therefore, you have // certain responsibilities if you distribute copies of the software, or if // you modify it: responsibilities to respect the freedom of others. // // For example, if you distribute copies of such a program, whether // gratis or for a fee, you must pass on to the recipients the same // freedoms that you received. You must make sure that they, too, receive // or can get the source code. And you must show them these terms so they // know their rights. // // Developers that use the GNU GPL protect your rights with two steps: // (1) assert copyright on the software, and (2) offer you this License // giving you legal permission to copy, distribute and/or modify it. // // For the developers' and authors' protection, the GPL clearly explains // that there is no warranty for this free software. For both users' and // authors' sake, the GPL requires that modified versions be marked as // changed, so that their problems will not be attributed erroneously to // authors of previous versions. // // Some devices are designed to deny users access to install or run // modified versions of the software inside them, although the manufacturer // can do so. This is fundamentally incompatible with the aim of // protecting users' freedom to change the software. The systematic // pattern of such abuse occurs in the area of products for individuals to // use, which is precisely where it is most unacceptable. Therefore, we // have designed this version of the GPL to prohibit the practice for those // products. If such problems arise substantially in other domains, we // stand ready to extend this provision to those domains in future versions // of the GPL, as needed to protect the freedom of users. // // Finally, every program is threatened constantly by software patents. // States should not allow patents to restrict development and use of // software on general-purpose computers, but in those that do, we wish to // avoid the special danger that patents applied to a free program could // make it effectively proprietary. To prevent this, the GPL assures that // patents cannot be used to render the program non-free. // // The precise terms and conditions for copying, distribution and // modification follow. // // TERMS AND CONDITIONS // // 0. Definitions. // // "This License" refers to version 3 of the GNU General Public License. // // "Copyright" also means copyright-like laws that apply to other kinds of // works, such as semiconductor masks. // // "The Program" refers to any copyrightable work licensed under this // License. Each licensee is addressed as "you". "Licensees" and // "recipients" may be individuals or organizations. // // To "modify" a work means to copy from or adapt all or part of the work // in a fashion requiring copyright permission, other than the making of an // exact copy. The resulting work is called a "modified version" of the // earlier work or a work "based on" the earlier work. // // A "covered work" means either the unmodified Program or a work based // on the Program. // // To "propagate" a work means to do anything with it that, without // permission, would make you directly or secondarily liable for // infringement under applicable copyright law, except executing it on a // computer or modifying a private copy. Propagation includes copying, // distribution (with or without modification), making available to the // public, and in some countries other activities as well. // // To "convey" a work means any kind of propagation that enables other // parties to make or receive copies. Mere interaction with a user through // a computer network, with no transfer of a copy, is not conveying. // // An interactive user interface displays "Appropriate Legal Notices" // to the extent that it includes a convenient and prominently visible // feature that (1) displays an appropriate copyright notice, and (2) // tells the user that there is no warranty for the work (except to the // extent that warranties are provided), that licensees may convey the // work under this License, and how to view a copy of this License. If // the interface presents a list of user commands or options, such as a // menu, a prominent item in the list meets this criterion. // // 1. Source Code. // // The "source code" for a work means the preferred form of the work // for making modifications to it. "Object code" means any non-source // form of a work. // // A "Standard Interface" means an interface that either is an official // standard defined by a recognized standards body, or, in the case of // interfaces specified for a particular programming language, one that // is widely used among developers working in that language. // // The "System Libraries" of an executable work include anything, other // than the work as a whole, that (a) is included in the normal form of // packaging a Major Component, but which is not part of that Major // Component, and (b) serves only to enable use of the work with that // Major Component, or to implement a Standard Interface for which an // implementation is available to the public in source code form. A // "Major Component", in this context, means a major essential component // (kernel, window system, and so on) of the specific operating system // (if any) on which the executable work runs, or a compiler used to // produce the work, or an object code interpreter used to run it. // // The "Corresponding Source" for a work in object code form means all // the source code needed to generate, install, and (for an executable // work) run the object code and to modify the work, including scripts to // control those activities. However, it does not include the work's // System Libraries, or general-purpose tools or generally available free // programs which are used unmodified in performing those activities but // which are not part of the work. For example, Corresponding Source // includes interface definition files associated with source files for // the work, and the source code for shared libraries and dynamically // linked subprograms that the work is specifically designed to require, // such as by intimate data communication or control flow between those // subprograms and other parts of the work. // // The Corresponding Source need not include anything that users // can regenerate automatically from other parts of the Corresponding // Source. // // The Corresponding Source for a work in source code form is that // same work. // // 2. Basic Permissions. // // All rights granted under this License are granted for the term of // copyright on the Program, and are irrevocable provided the stated // conditions are met. This License explicitly affirms your unlimited // permission to run the unmodified Program. The output from running a // covered work is covered by this License only if the output, given its // content, constitutes a covered work. This License acknowledges your // rights of fair use or other equivalent, as provided by copyright law. // // You may make, run and propagate covered works that you do not // convey, without conditions so long as your license otherwise remains // in force. You may convey covered works to others for the sole purpose // of having them make modifications exclusively for you, or provide you // with facilities for running those works, provided that you comply with // the terms of this License in conveying all material for which you do // not control copyright. Those thus making or running the covered works // for you must do so exclusively on your behalf, under your direction // and control, on terms that prohibit them from making any copies of // your copyrighted material outside their relationship with you. // // Conveying under any other circumstances is permitted solely under // the conditions stated below. Sublicensing is not allowed; section 10 // makes it unnecessary. // // 3. Protecting Users' Legal Rights From Anti-Circumvention Law. // // No covered work shall be deemed part of an effective technological // measure under any applicable law fulfilling obligations under article // 11 of the WIPO copyright treaty adopted on 20 December 1996, or // similar laws prohibiting or restricting circumvention of such // measures. // // When you convey a covered work, you waive any legal power to forbid // circumvention of technological measures to the extent such circumvention // is effected by exercising rights under this License with respect to // the covered work, and you disclaim any intention to limit operation or // modification of the work as a means of enforcing, against the work's // users, your or third parties' legal rights to forbid circumvention of // technological measures. // // 4. Conveying Verbatim Copies. // // You may convey verbatim copies of the Program's source code as you // receive it, in any medium, provided that you conspicuously and // appropriately publish on each copy an appropriate copyright notice; // keep intact all notices stating that this License and any // non-permissive terms added in accord with section 7 apply to the code; // keep intact all notices of the absence of any warranty; and give all // recipients a copy of this License along with the Program. // // You may charge any price or no price for each copy that you convey, // and you may offer support or warranty protection for a fee. // // 5. Conveying Modified Source Versions. // // You may convey a work based on the Program, or the modifications to // produce it from the Program, in the form of source code under the // terms of section 4, provided that you also meet all of these conditions: // // a) The work must carry prominent notices stating that you modified // it, and giving a relevant date. // // b) The work must carry prominent notices stating that it is // released under this License and any conditions added under section // 7. This requirement modifies the requirement in section 4 to // "keep intact all notices". // // c) You must license the entire work, as a whole, under this // License to anyone who comes into possession of a copy. This // License will therefore apply, along with any applicable section 7 // additional terms, to the whole of the work, and all its parts, // regardless of how they are packaged. This License gives no // permission to license the work in any other way, but it does not // invalidate such permission if you have separately received it. // // d) If the work has interactive user interfaces, each must display // Appropriate Legal Notices; however, if the Program has interactive // interfaces that do not display Appropriate Legal Notices, your // work need not make them do so. // // A compilation of a covered work with other separate and independent // works, which are not by their nature extensions of the covered work, // and which are not combined with it such as to form a larger program, // in or on a volume of a storage or distribution medium, is called an // "aggregate" if the compilation and its resulting copyright are not // used to limit the access or legal rights of the compilation's users // beyond what the individual works permit. Inclusion of a covered work // in an aggregate does not cause this License to apply to the other // parts of the aggregate. // // 6. Conveying Non-Source Forms. // // You may convey a covered work in object code form under the terms // of sections 4 and 5, provided that you also convey the // machine-readable Corresponding Source under the terms of this License, // in one of these ways: // // a) Convey the object code in, or embodied in, a physical product // (including a physical distribution medium), accompanied by the // Corresponding Source fixed on a durable physical medium // customarily used for software interchange. // // b) Convey the object code in, or embodied in, a physical product // (including a physical distribution medium), accompanied by a // written offer, valid for at least three years and valid for as // long as you offer spare parts or customer support for that product // model, to give anyone who possesses the object code either (1) a // copy of the Corresponding Source for all the software in the // product that is covered by this License, on a durable physical // medium customarily used for software interchange, for a price no // more than your reasonable cost of physically performing this // conveying of source, or (2) access to copy the // Corresponding Source from a network server at no charge. // // c) Convey individual copies of the object code with a copy of the // written offer to provide the Corresponding Source. This // alternative is allowed only occasionally and noncommercially, and // only if you received the object code with such an offer, in accord // with subsection 6b. // // d) Convey the object code by offering access from a designated // place (gratis or for a charge), and offer equivalent access to the // Corresponding Source in the same way through the same place at no // further charge. You need not require recipients to copy the // Corresponding Source along with the object code. If the place to // copy the object code is a network server, the Corresponding Source // may be on a different server (operated by you or a third party) // that supports equivalent copying facilities, provided you maintain // clear directions next to the object code saying where to find the // Corresponding Source. Regardless of what server hosts the // Corresponding Source, you remain obligated to ensure that it is // available for as long as needed to satisfy these requirements. // // e) Convey the object code using peer-to-peer transmission, provided // you inform other peers where the object code and Corresponding // Source of the work are being offered to the general public at no // charge under subsection 6d. // // A separable portion of the object code, whose source code is excluded // from the Corresponding Source as a System Library, need not be // included in conveying the object code work. // // A "User Product" is either (1) a "consumer product", which means any // tangible personal property which is normally used for personal, family, // or household purposes, or (2) anything designed or sold for incorporation // into a dwelling. In determining whether a product is a consumer product, // doubtful cases shall be resolved in favor of coverage. For a particular // product received by a particular user, "normally used" refers to a // typical or common use of that class of product, regardless of the status // of the particular user or of the way in which the particular user // actually uses, or expects or is expected to use, the product. A product // is a consumer product regardless of whether the product has substantial // commercial, industrial or non-consumer uses, unless such uses represent // the only significant mode of use of the product. // // "Installation Information" for a User Product means any methods, // procedures, authorization keys, or other information required to install // and execute modified versions of a covered work in that User Product from // a modified version of its Corresponding Source. The information must // suffice to ensure that the continued functioning of the modified object // code is in no case prevented or interfered with solely because // modification has been made. // // If you convey an object code work under this section in, or with, or // specifically for use in, a User Product, and the conveying occurs as // part of a transaction in which the right of possession and use of the // User Product is transferred to the recipient in perpetuity or for a // fixed term (regardless of how the transaction is characterized), the // Corresponding Source conveyed under this section must be accompanied // by the Installation Information. But this requirement does not apply // if neither you nor any third party retains the ability to install // modified object code on the User Product (for example, the work has // been installed in ROM). // // The requirement to provide Installation Information does not include a // requirement to continue to provide support service, warranty, or updates // for a work that has been modified or installed by the recipient, or for // the User Product in which it has been modified or installed. Access to a // network may be denied when the modification itself materially and // adversely affects the operation of the network or violates the rules and // protocols for communication across the network. // // Corresponding Source conveyed, and Installation Information provided, // in accord with this section must be in a format that is publicly // documented (and with an implementation available to the public in // source code form), and must require no special password or key for // unpacking, reading or copying. // // 7. Additional Terms. // // "Additional permissions" are terms that supplement the terms of this // License by making exceptions from one or more of its conditions. // Additional permissions that are applicable to the entire Program shall // be treated as though they were included in this License, to the extent // that they are valid under applicable law. If additional permissions // apply only to part of the Program, that part may be used separately // under those permissions, but the entire Program remains governed by // this License without regard to the additional permissions. // // When you convey a copy of a covered work, you may at your option // remove any additional permissions from that copy, or from any part of // it. (Additional permissions may be written to require their own // removal in certain cases when you modify the work.) You may place // additional permissions on material, added by you to a covered work, // for which you have or can give appropriate copyright permission. // // Notwithstanding any other provision of this License, for material you // add to a covered work, you may (if authorized by the copyright holders of // that material) supplement the terms of this License with terms: // // a) Disclaiming warranty or limiting liability differently from the // terms of sections 15 and 16 of this License; or // // b) Requiring preservation of specified reasonable legal notices or // author attributions in that material or in the Appropriate Legal // Notices displayed by works containing it; or // // c) Prohibiting misrepresentation of the origin of that material, or // requiring that modified versions of such material be marked in // reasonable ways as different from the original version; or // // d) Limiting the use for publicity purposes of names of licensors or // authors of the material; or // // e) Declining to grant rights under trademark law for use of some // trade names, trademarks, or service marks; or // // f) Requiring indemnification of licensors and authors of that // material by anyone who conveys the material (or modified versions of // it) with contractual assumptions of liability to the recipient, for // any liability that these contractual assumptions directly impose on // those licensors and authors. // // All other non-permissive additional terms are considered "further // restrictions" within the meaning of section 10. If the Program as you // received it, or any part of it, contains a notice stating that it is // governed by this License along with a term that is a further // restriction, you may remove that term. If a license document contains // a further restriction but permits relicensing or conveying under this // License, you may add to a covered work material governed by the terms // of that license document, provided that the further restriction does // not survive such relicensing or conveying. // // If you add terms to a covered work in accord with this section, you // must place, in the relevant source files, a statement of the // additional terms that apply to those files, or a notice indicating // where to find the applicable terms. // // Additional terms, permissive or non-permissive, may be stated in the // form of a separately written license, or stated as exceptions; // the above requirements apply either way. // // 8. Termination. // // You may not propagate or modify a covered work except as expressly // provided under this License. Any attempt otherwise to propagate or // modify it is void, and will automatically terminate your rights under // this License (including any patent licenses granted under the third // paragraph of section 11). // // However, if you cease all violation of this License, then your // license from a particular copyright holder is reinstated (a) // provisionally, unless and until the copyright holder explicitly and // finally terminates your license, and (b) permanently, if the copyright // holder fails to notify you of the violation by some reasonable means // prior to 60 days after the cessation. // // Moreover, your license from a particular copyright holder is // reinstated permanently if the copyright holder notifies you of the // violation by some reasonable means, this is the first time you have // received notice of violation of this License (for any work) from that // copyright holder, and you cure the violation prior to 30 days after // your receipt of the notice. // // Termination of your rights under this section does not terminate the // licenses of parties who have received copies or rights from you under // this License. If your rights have been terminated and not permanently // reinstated, you do not qualify to receive new licenses for the same // material under section 10. // // 9. Acceptance Not Required for Having Copies. // // You are not required to accept this License in order to receive or // run a copy of the Program. Ancillary propagation of a covered work // occurring solely as a consequence of using peer-to-peer transmission // to receive a copy likewise does not require acceptance. However, // nothing other than this License grants you permission to propagate or // modify any covered work. These actions infringe copyright if you do // not accept this License. Therefore, by modifying or propagating a // covered work, you indicate your acceptance of this License to do so. // // 10. Automatic Licensing of Downstream Recipients. // // Each time you convey a covered work, the recipient automatically // receives a license from the original licensors, to run, modify and // propagate that work, subject to this License. You are not responsible // for enforcing compliance by third parties with this License. // // An "entity transaction" is a transaction transferring control of an // organization, or substantially all assets of one, or subdividing an // organization, or merging organizations. If propagation of a covered // work results from an entity transaction, each party to that // transaction who receives a copy of the work also receives whatever // licenses to the work the party's predecessor in interest had or could // give under the previous paragraph, plus a right to possession of the // Corresponding Source of the work from the predecessor in interest, if // the predecessor has it or can get it with reasonable efforts. // // You may not impose any further restrictions on the exercise of the // rights granted or affirmed under this License. For example, you may // not impose a license fee, royalty, or other charge for exercise of // rights granted under this License, and you may not initiate litigation // (including a cross-claim or counterclaim in a lawsuit) alleging that // any patent claim is infringed by making, using, selling, offering for // sale, or importing the Program or any portion of it. // // 11. Patents. // // A "contributor" is a copyright holder who authorizes use under this // License of the Program or a work on which the Program is based. The // work thus licensed is called the contributor's "contributor version". // // A contributor's "essential patent claims" are all patent claims // owned or controlled by the contributor, whether already acquired or // hereafter acquired, that would be infringed by some manner, permitted // by this License, of making, using, or selling its contributor version, // but do not include claims that would be infringed only as a // consequence of further modification of the contributor version. For // purposes of this definition, "control" includes the right to grant // patent sublicenses in a manner consistent with the requirements of // this License. // // Each contributor grants you a non-exclusive, worldwide, royalty-free // patent license under the contributor's essential patent claims, to // make, use, sell, offer for sale, import and otherwise run, modify and // propagate the contents of its contributor version. // // In the following three paragraphs, a "patent license" is any express // agreement or commitment, however denominated, not to enforce a patent // (such as an express permission to practice a patent or covenant not to // sue for patent infringement). To "grant" such a patent license to a // party means to make such an agreement or commitment not to enforce a // patent against the party. // // If you convey a covered work, knowingly relying on a patent license, // and the Corresponding Source of the work is not available for anyone // to copy, free of charge and under the terms of this License, through a // publicly available network server or other readily accessible means, // then you must either (1) cause the Corresponding Source to be so // available, or (2) arrange to deprive yourself of the benefit of the // patent license for this particular work, or (3) arrange, in a manner // consistent with the requirements of this License, to extend the patent // license to downstream recipients. "Knowingly relying" means you have // actual knowledge that, but for the patent license, your conveying the // covered work in a country, or your recipient's use of the covered work // in a country, would infringe one or more identifiable patents in that // country that you have reason to believe are valid. // // If, pursuant to or in connection with a single transaction or // arrangement, you convey, or propagate by procuring conveyance of, a // covered work, and grant a patent license to some of the parties // receiving the covered work authorizing them to use, propagate, modify // or convey a specific copy of the covered work, then the patent license // you grant is automatically extended to all recipients of the covered // work and works based on it. // // A patent license is "discriminatory" if it does not include within // the scope of its coverage, prohibits the exercise of, or is // conditioned on the non-exercise of one or more of the rights that are // specifically granted under this License. You may not convey a covered // work if you are a party to an arrangement with a third party that is // in the business of distributing software, under which you make payment // to the third party based on the extent of your activity of conveying // the work, and under which the third party grants, to any of the // parties who would receive the covered work from you, a discriminatory // patent license (a) in connection with copies of the covered work // conveyed by you (or copies made from those copies), or (b) primarily // for and in connection with specific products or compilations that // contain the covered work, unless you entered into that arrangement, // or that patent license was granted, prior to 28 March 2007. // // Nothing in this License shall be construed as excluding or limiting // any implied license or other defenses to infringement that may // otherwise be available to you under applicable patent law. // // 12. No Surrender of Others' Freedom. // // If conditions are imposed on you (whether by court order, agreement or // otherwise) that contradict the conditions of this License, they do not // excuse you from the conditions of this License. If you cannot convey a // covered work so as to satisfy simultaneously your obligations under this // License and any other pertinent obligations, then as a consequence you may // not convey it at all. For example, if you agree to terms that obligate you // to collect a royalty for further conveying from those to whom you convey // the Program, the only way you could satisfy both those terms and this // License would be to refrain entirely from conveying the Program. // // 13. Use with the GNU Affero General Public License. // // Notwithstanding any other provision of this License, you have // permission to link or combine any covered work with a work licensed // under version 3 of the GNU Affero General Public License into a single // combined work, and to convey the resulting work. The terms of this // License will continue to apply to the part which is the covered work, // but the special requirements of the GNU Affero General Public License, // section 13, concerning interaction through a network will apply to the // combination as such. // // 14. Revised Versions of this License. // // The Free Software Foundation may publish revised and/or new versions of // the GNU General Public License from time to time. Such new versions will // be similar in spirit to the present version, but may differ in detail to // address new problems or concerns. // // Each version is given a distinguishing version number. If the // Program specifies that a certain numbered version of the GNU General // Public License "or any later version" applies to it, you have the // option of following the terms and conditions either of that numbered // version or of any later version published by the Free Software // Foundation. If the Program does not specify a version number of the // GNU General Public License, you may choose any version ever published // by the Free Software Foundation. // // If the Program specifies that a proxy can decide which future // versions of the GNU General Public License can be used, that proxy's // public statement of acceptance of a version permanently authorizes you // to choose that version for the Program. // // Later license versions may give you additional or different // permissions. However, no additional obligations are imposed on any // author or copyright holder as a result of your choosing to follow a // later version. // // 15. Disclaimer of Warranty. // // THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY // APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT // HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY // OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, // THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM // IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF // ALL NECESSARY SERVICING, REPAIR OR CORRECTION. // // 16. Limitation of Liability. // // IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING // WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS // THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY // GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE // USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF // DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD // PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), // EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF // SUCH DAMAGES. // // 17. Interpretation of Sections 15 and 16. // // If the disclaimer of warranty and limitation of liability provided // above cannot be given local legal effect according to their terms, // reviewing courts shall apply local law that most closely approximates // an absolute waiver of all civil liability in connection with the // Program, unless a warranty or assumption of liability accompanies a // copy of the Program in return for a fee. // // END OF TERMS AND CONDITIONS const char* license=" /************************************************************************************\n \ * EOS - the CERN Disk Storage System *\n \ * Copyright (C) 2011 CERN/Switzerland *\n \ * *\n \ * This program is free software: you can redistribute it and/or modify *\n \ * it under the terms of the GNU General Public License as published by *\n \ * the Free Software Foundation, either version 3 of the License, or *\n \ * (at your option) any later version. *\n \ * *\n \ * This program is distributed in the hope that it will be useful, *\n \ * but WITHOUT ANY WARRANTY; without even the implied warranty of *\n \ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *\n \ * GNU General Public License for more details. *\n \ * *\n \ * EOS is based on the XRootD software: *\n \ * ----------------------------------------------------------------------------------*\n \ * Copyright (C) 2005-2010, Board of Trustees of the Leland Stanford, Jr. University.*\n \ * Produced under contract DE-AC02-76-SF00515 with the US Department of Energy. *\n \ * All rights reserved. *\n \ * See for more details. *\n \ * *\n \ * EOS uses crc32c checksum alogrithms from MIT/Intel: *\n \ * ----------------------------------------------------------------------------------*\n \ * Copyright 2008,2009,2010 Massachusetts Institute of Technology. *\n \ * Implementations adapted from Intel's Slicing By 8 Sourceforge Project *\n \ * http://sourceforge.net/projects/slicing-by-8/ *\n \ * Copyright (c) 2004-2006 Intel Corporation *\n \ ************************************************************************************/\n \ \n \ GNU GENERAL PUBLIC LICENSE\n \ Version 3, 29 June 2007\n \ \n \ Copyright (C) 2007 Free Software Foundation, Inc. \n \ Everyone is permitted to copy and distribute verbatim copies\n \ of this license document, but changing it is not allowed.\n \ \n \ Preamble\n \ \n \ The GNU General Public License is a free, copyleft license for\n \ software and other kinds of works.\n \ \n \ The licenses for most software and other practical works are designed\n \ to take away your freedom to share and change the works. By contrast,\n \ the GNU General Public License is intended to guarantee your freedom to\n \ share and change all versions of a program--to make sure it remains free\n \ software for all its users. We, the Free Software Foundation, use the\n \ GNU General Public License for most of our software; it applies also to\n \ any other work released this way by its authors. You can apply it to\n \ your programs, too.\n \ \n \ When we speak of free software, we are referring to freedom, not\n \ price. Our General Public Licenses are designed to make sure that you\n \ have the freedom to distribute copies of free software (and charge for\n \ them if you wish), that you receive source code or can get it if you\n \ want it, that you can change the software or use pieces of it in new\n \ free programs, and that you know you can do these things.\n \ \n \ To protect your rights, we need to prevent others from denying you\n \ these rights or asking you to surrender the rights. Therefore, you have\n \ certain responsibilities if you distribute copies of the software, or if\n \ you modify it: responsibilities to respect the freedom of others.\n \ \n \ For example, if you distribute copies of such a program, whether\n \ gratis or for a fee, you must pass on to the recipients the same\n \ freedoms that you received. You must make sure that they, too, receive\n \ or can get the source code. And you must show them these terms so they\n \ know their rights.\n \ \n \ Developers that use the GNU GPL protect your rights with two steps:\n \ (1) assert copyright on the software, and (2) offer you this License\n \ giving you legal permission to copy, distribute and/or modify it.\n \ \n \ For the developers' and authors' protection, the GPL clearly explains\n \ that there is no warranty for this free software. For both users' and\n \ authors' sake, the GPL requires that modified versions be marked as\n \ changed, so that their problems will not be attributed erroneously to\n \ authors of previous versions.\n \ \n \ Some devices are designed to deny users access to install or run\n \ modified versions of the software inside them, although the manufacturer\n \ can do so. This is fundamentally incompatible with the aim of\n \ protecting users' freedom to change the software. The systematic\n \ pattern of such abuse occurs in the area of products for individuals to\n \ use, which is precisely where it is most unacceptable. Therefore, we\n \ have designed this version of the GPL to prohibit the practice for those\n \ products. If such problems arise substantially in other domains, we\n \ stand ready to extend this provision to those domains in future versions\n \ of the GPL, as needed to protect the freedom of users.\n \ \n \ Finally, every program is threatened constantly by software patents.\n \ States should not allow patents to restrict development and use of\n \ software on general-purpose computers, but in those that do, we wish to\n \ avoid the special danger that patents applied to a free program could\n \ make it effectively proprietary. To prevent this, the GPL assures that\n \ patents cannot be used to render the program non-free.\n \ \n \ The precise terms and conditions for copying, distribution and\n \ modification follow.\n \ \n \ TERMS AND CONDITIONS\n \ \n \ 0. Definitions.\n \ \n \ \"This License\" refers to version 3 of the GNU General Public License.\n \ \n \ \"Copyright\" also means copyright-like laws that apply to other kinds of\n \ works, such as semiconductor masks.\n \ \n \ \"The Program\" refers to any copyrightable work licensed under this\n \ License. Each licensee is addressed as \"you\". \"Licensees\" and\n \ \"recipients\" may be individuals or organizations.\n \ \n \ To \"modify\" a work means to copy from or adapt all or part of the work\n \ in a fashion requiring copyright permission, other than the making of an\n \ exact copy. The resulting work is called a \"modified version\" of the\n \ earlier work or a work \"based on\" the earlier work.\n \ \n \ A \"covered work\" means either the unmodified Program or a work based\n \ on the Program.\n \ \n \ To \"propagate\" a work means to do anything with it that, without\n \ permission, would make you directly or secondarily liable for\n \ infringement under applicable copyright law, except executing it on a\n \ computer or modifying a private copy. Propagation includes copying,\n \ distribution (with or without modification), making available to the\n \ public, and in some countries other activities as well.\n \ \n \ To \"convey\" a work means any kind of propagation that enables other\n \ parties to make or receive copies. Mere interaction with a user through\n \ a computer network, with no transfer of a copy, is not conveying.\n \ \n \ An interactive user interface displays \"Appropriate Legal Notices\"\n \ to the extent that it includes a convenient and prominently visible\n \ feature that (1) displays an appropriate copyright notice, and (2)\n \ tells the user that there is no warranty for the work (except to the\n \ extent that warranties are provided), that licensees may convey the\n \ work under this License, and how to view a copy of this License. If\n \ the interface presents a list of user commands or options, such as a\n \ menu, a prominent item in the list meets this criterion.\n \ \n \ 1. Source Code.\n \ \n \ The \"source code\" for a work means the preferred form of the work\n \ for making modifications to it. \"Object code\" means any non-source\n \ form of a work.\n \ \n \ A \"Standard Interface\" means an interface that either is an official\n \ standard defined by a recognized standards body, or, in the case of\n \ interfaces specified for a particular programming language, one that\n \ is widely used among developers working in that language.\n \ \n \ The \"System Libraries\" of an executable work include anything, other\n \ than the work as a whole, that (a) is included in the normal form of\n \ packaging a Major Component, but which is not part of that Major\n \ Component, and (b) serves only to enable use of the work with that\n \ Major Component, or to implement a Standard Interface for which an\n \ implementation is available to the public in source code form. A\n \ \"Major Component\", in this context, means a major essential component\n \ (kernel, window system, and so on) of the specific operating system\n \ (if any) on which the executable work runs, or a compiler used to\n \ produce the work, or an object code interpreter used to run it.\n \ \n \ The \"Corresponding Source\" for a work in object code form means all\n \ the source code needed to generate, install, and (for an executable\n \ work) run the object code and to modify the work, including scripts to\n \ control those activities. However, it does not include the work's\n \ System Libraries, or general-purpose tools or generally available free\n \ programs which are used unmodified in performing those activities but\n \ which are not part of the work. For example, Corresponding Source\n \ includes interface definition files associated with source files for\n \ the work, and the source code for shared libraries and dynamically\n \ linked subprograms that the work is specifically designed to require,\n \ such as by intimate data communication or control flow between those\n \ subprograms and other parts of the work.\n \ \n \ The Corresponding Source need not include anything that users\n \ can regenerate automatically from other parts of the Corresponding\n \ Source.\n \ \n \ The Corresponding Source for a work in source code form is that\n \ same work.\n \ \n \ 2. Basic Permissions.\n \ \n \ All rights granted under this License are granted for the term of\n \ copyright on the Program, and are irrevocable provided the stated\n \ conditions are met. This License explicitly affirms your unlimited\n \ permission to run the unmodified Program. The output from running a\n \ covered work is covered by this License only if the output, given its\n \ content, constitutes a covered work. This License acknowledges your\n \ rights of fair use or other equivalent, as provided by copyright law.\n \ \n \ You may make, run and propagate covered works that you do not\n \ convey, without conditions so long as your license otherwise remains\n \ in force. You may convey covered works to others for the sole purpose\n \ of having them make modifications exclusively for you, or provide you\n \ with facilities for running those works, provided that you comply with\n \ the terms of this License in conveying all material for which you do\n \ not control copyright. Those thus making or running the covered works\n \ for you must do so exclusively on your behalf, under your direction\n \ and control, on terms that prohibit them from making any copies of\n \ your copyrighted material outside their relationship with you.\n \ \n \ Conveying under any other circumstances is permitted solely under\n \ the conditions stated below. Sublicensing is not allowed; section 10\n \ makes it unnecessary.\n \ \n \ 3. Protecting Users' Legal Rights From Anti-Circumvention Law.\n \ \n \ No covered work shall be deemed part of an effective technological\n \ measure under any applicable law fulfilling obligations under article\n \ 11 of the WIPO copyright treaty adopted on 20 December 1996, or\n \ similar laws prohibiting or restricting circumvention of such\n \ measures.\n \ \n \ When you convey a covered work, you waive any legal power to forbid\n \ circumvention of technological measures to the extent such circumvention\n \ is effected by exercising rights under this License with respect to\n \ the covered work, and you disclaim any intention to limit operation or\n \ modification of the work as a means of enforcing, against the work's\n \ users, your or third parties' legal rights to forbid circumvention of\n \ technological measures.\n \ \n \ 4. Conveying Verbatim Copies.\n \ \n \ You may convey verbatim copies of the Program's source code as you\n \ receive it, in any medium, provided that you conspicuously and\n \ appropriately publish on each copy an appropriate copyright notice;\n \ keep intact all notices stating that this License and any\n \ non-permissive terms added in accord with section 7 apply to the code;\n \ keep intact all notices of the absence of any warranty; and give all\n \ recipients a copy of this License along with the Program.\n \ \n \ You may charge any price or no price for each copy that you convey,\n \ and you may offer support or warranty protection for a fee.\n \ \n \ 5. Conveying Modified Source Versions.\n \ \n \ You may convey a work based on the Program, or the modifications to\n \ produce it from the Program, in the form of source code under the\n \ terms of section 4, provided that you also meet all of these conditions:\n \ \n \ a) The work must carry prominent notices stating that you modified\n \ it, and giving a relevant date.\n \ \n \ b) The work must carry prominent notices stating that it is\n \ released under this License and any conditions added under section\n \ 7. This requirement modifies the requirement in section 4 to\n \ \"keep intact all notices\".\n \ \n \ c) You must license the entire work, as a whole, under this\n \ License to anyone who comes into possession of a copy. This\n \ License will therefore apply, along with any applicable section 7\n \ additional terms, to the whole of the work, and all its parts,\n \ regardless of how they are packaged. This License gives no\n \ permission to license the work in any other way, but it does not\n \ invalidate such permission if you have separately received it.\n \ \n \ d) If the work has interactive user interfaces, each must display\n \ Appropriate Legal Notices; however, if the Program has interactive\n \ interfaces that do not display Appropriate Legal Notices, your\n \ work need not make them do so.\n \ \n \ A compilation of a covered work with other separate and independent\n \ works, which are not by their nature extensions of the covered work,\n \ and which are not combined with it such as to form a larger program,\n \ in or on a volume of a storage or distribution medium, is called an\n \ \"aggregate\" if the compilation and its resulting copyright are not\n \ used to limit the access or legal rights of the compilation's users\n \ beyond what the individual works permit. Inclusion of a covered work\n \ in an aggregate does not cause this License to apply to the other\n \ parts of the aggregate.\n \ \n \ 6. Conveying Non-Source Forms.\n \ \n \ You may convey a covered work in object code form under the terms\n \ of sections 4 and 5, provided that you also convey the\n \ machine-readable Corresponding Source under the terms of this License,\n \ in one of these ways:\n \ \n \ a) Convey the object code in, or embodied in, a physical product\n \ (including a physical distribution medium), accompanied by the\n \ Corresponding Source fixed on a durable physical medium\n \ customarily used for software interchange.\n \ \n \ b) Convey the object code in, or embodied in, a physical product\n \ (including a physical distribution medium), accompanied by a\n \ written offer, valid for at least three years and valid for as\n \ long as you offer spare parts or customer support for that product\n \ model, to give anyone who possesses the object code either (1) a\n \ copy of the Corresponding Source for all the software in the\n \ product that is covered by this License, on a durable physical\n \ medium customarily used for software interchange, for a price no\n \ more than your reasonable cost of physically performing this\n \ conveying of source, or (2) access to copy the\n \ Corresponding Source from a network server at no charge.\n \ \n \ c) Convey individual copies of the object code with a copy of the\n \ written offer to provide the Corresponding Source. This\n \ alternative is allowed only occasionally and noncommercially, and\n \ only if you received the object code with such an offer, in accord\n \ with subsection 6b.\n \ \n \ d) Convey the object code by offering access from a designated\n \ place (gratis or for a charge), and offer equivalent access to the\n \ Corresponding Source in the same way through the same place at no\n \ further charge. You need not require recipients to copy the\n \ Corresponding Source along with the object code. If the place to\n \ copy the object code is a network server, the Corresponding Source\n \ may be on a different server (operated by you or a third party)\n \ that supports equivalent copying facilities, provided you maintain\n \ clear directions next to the object code saying where to find the\n \ Corresponding Source. Regardless of what server hosts the\n \ Corresponding Source, you remain obligated to ensure that it is\n \ available for as long as needed to satisfy these requirements.\n \ \n \ e) Convey the object code using peer-to-peer transmission, provided\n \ you inform other peers where the object code and Corresponding\n \ Source of the work are being offered to the general public at no\n \ charge under subsection 6d.\n \ \n \ A separable portion of the object code, whose source code is excluded\n \ from the Corresponding Source as a System Library, need not be\n \ included in conveying the object code work.\n \ \n \ A \"User Product\" is either (1) a \"consumer product\", which means any\n \ tangible personal property which is normally used for personal, family,\n \ or household purposes, or (2) anything designed or sold for incorporation\n \ into a dwelling. In determining whether a product is a consumer product,\n \ doubtful cases shall be resolved in favor of coverage. For a particular\n \ product received by a particular user, \"normally used\" refers to a\n \ typical or common use of that class of product, regardless of the status\n \ of the particular user or of the way in which the particular user\n \ actually uses, or expects or is expected to use, the product. A product\n \ is a consumer product regardless of whether the product has substantial\n \ commercial, industrial or non-consumer uses, unless such uses represent\n \ the only significant mode of use of the product.\n \ \n \ \"Installation Information\" for a User Product means any methods,\n \ procedures, authorization keys, or other information required to install\n \ and execute modified versions of a covered work in that User Product from\n \ a modified version of its Corresponding Source. The information must\n \ suffice to ensure that the continued functioning of the modified object\n \ code is in no case prevented or interfered with solely because\n \ modification has been made.\n \ \n \ If you convey an object code work under this section in, or with, or\n \ specifically for use in, a User Product, and the conveying occurs as\n \ part of a transaction in which the right of possession and use of the\n \ User Product is transferred to the recipient in perpetuity or for a\n \ fixed term (regardless of how the transaction is characterized), the\n \ Corresponding Source conveyed under this section must be accompanied\n \ by the Installation Information. But this requirement does not apply\n \ if neither you nor any third party retains the ability to install\n \ modified object code on the User Product (for example, the work has\n \ been installed in ROM).\n \ \n \ The requirement to provide Installation Information does not include a\n \ requirement to continue to provide support service, warranty, or updates\n \ for a work that has been modified or installed by the recipient, or for\n \ the User Product in which it has been modified or installed. Access to a\n \ network may be denied when the modification itself materially and\n \ adversely affects the operation of the network or violates the rules and\n \ protocols for communication across the network.\n \ \n \ Corresponding Source conveyed, and Installation Information provided,\n \ in accord with this section must be in a format that is publicly\n \ documented (and with an implementation available to the public in\n \ source code form), and must require no special password or key for\n \ unpacking, reading or copying.\n \ \n \ 7. Additional Terms.\n \ \n \ \"Additional permissions\" are terms that supplement the terms of this\n \ License by making exceptions from one or more of its conditions.\n \ Additional permissions that are applicable to the entire Program shall\n \ be treated as though they were included in this License, to the extent\n \ that they are valid under applicable law. If additional permissions\n \ apply only to part of the Program, that part may be used separately\n \ under those permissions, but the entire Program remains governed by\n \ this License without regard to the additional permissions.\n \ \n \ When you convey a copy of a covered work, you may at your option\n \ remove any additional permissions from that copy, or from any part of\n \ it. (Additional permissions may be written to require their own\n \ removal in certain cases when you modify the work.) You may place\n \ additional permissions on material, added by you to a covered work,\n \ for which you have or can give appropriate copyright permission.\n \ \n \ Notwithstanding any other provision of this License, for material you\n \ add to a covered work, you may (if authorized by the copyright holders of\n \ that material) supplement the terms of this License with terms:\n \ \n \ a) Disclaiming warranty or limiting liability differently from the\n \ terms of sections 15 and 16 of this License; or\n \ \n \ b) Requiring preservation of specified reasonable legal notices or\n \ author attributions in that material or in the Appropriate Legal\n \ Notices displayed by works containing it; or\n \ \n \ c) Prohibiting misrepresentation of the origin of that material, or\n \ requiring that modified versions of such material be marked in\n \ reasonable ways as different from the original version; or\n \ \n \ d) Limiting the use for publicity purposes of names of licensors or\n \ authors of the material; or\n \ \n \ e) Declining to grant rights under trademark law for use of some\n \ trade names, trademarks, or service marks; or\n \ \n \ f) Requiring indemnification of licensors and authors of that\n \ material by anyone who conveys the material (or modified versions of\n \ it) with contractual assumptions of liability to the recipient, for\n \ any liability that these contractual assumptions directly impose on\n \ those licensors and authors.\n \ \n \ All other non-permissive additional terms are considered \"further\n \ restrictions\" within the meaning of section 10. If the Program as you\n \ received it, or any part of it, contains a notice stating that it is\n \ governed by this License along with a term that is a further\n \ restriction, you may remove that term. If a license document contains\n \ a further restriction but permits relicensing or conveying under this\n \ License, you may add to a covered work material governed by the terms\n \ of that license document, provided that the further restriction does\n \ not survive such relicensing or conveying.\n \ \n \ If you add terms to a covered work in accord with this section, you\n \ must place, in the relevant source files, a statement of the\n \ additional terms that apply to those files, or a notice indicating\n \ where to find the applicable terms.\n \ \n \ Additional terms, permissive or non-permissive, may be stated in the\n \ form of a separately written license, or stated as exceptions;\n \ the above requirements apply either way.\n \ \n \ 8. Termination.\n \ \n \ You may not propagate or modify a covered work except as expressly\n \ provided under this License. Any attempt otherwise to propagate or\n \ modify it is void, and will automatically terminate your rights under\n \ this License (including any patent licenses granted under the third\n \ paragraph of section 11).\n \ \n \ However, if you cease all violation of this License, then your\n \ license from a particular copyright holder is reinstated (a)\n \ provisionally, unless and until the copyright holder explicitly and\n \ finally terminates your license, and (b) permanently, if the copyright\n \ holder fails to notify you of the violation by some reasonable means\n \ prior to 60 days after the cessation.\n \ \n \ Moreover, your license from a particular copyright holder is\n \ reinstated permanently if the copyright holder notifies you of the\n \ violation by some reasonable means, this is the first time you have\n \ received notice of violation of this License (for any work) from that\n \ copyright holder, and you cure the violation prior to 30 days after\n \ your receipt of the notice.\n \ \n \ Termination of your rights under this section does not terminate the\n \ licenses of parties who have received copies or rights from you under\n \ this License. If your rights have been terminated and not permanently\n \ reinstated, you do not qualify to receive new licenses for the same\n \ material under section 10.\n \ \n \ 9. Acceptance Not Required for Having Copies.\n \ \n \ You are not required to accept this License in order to receive or\n \ run a copy of the Program. Ancillary propagation of a covered work\n \ occurring solely as a consequence of using peer-to-peer transmission\n \ to receive a copy likewise does not require acceptance. However,\n \ nothing other than this License grants you permission to propagate or\n \ modify any covered work. These actions infringe copyright if you do\n \ not accept this License. Therefore, by modifying or propagating a\n \ covered work, you indicate your acceptance of this License to do so.\n \ \n \ 10. Automatic Licensing of Downstream Recipients.\n \ \n \ Each time you convey a covered work, the recipient automatically\n \ receives a license from the original licensors, to run, modify and\n \ propagate that work, subject to this License. You are not responsible\n \ for enforcing compliance by third parties with this License.\n \ \n \ An \"entity transaction\" is a transaction transferring control of an\n \ organization, or substantially all assets of one, or subdividing an\n \ organization, or merging organizations. If propagation of a covered\n \ work results from an entity transaction, each party to that\n \ transaction who receives a copy of the work also receives whatever\n \ licenses to the work the party's predecessor in interest had or could\n \ give under the previous paragraph, plus a right to possession of the\n \ Corresponding Source of the work from the predecessor in interest, if\n \ the predecessor has it or can get it with reasonable efforts.\n \ \n \ You may not impose any further restrictions on the exercise of the\n \ rights granted or affirmed under this License. For example, you may\n \ not impose a license fee, royalty, or other charge for exercise of\n \ rights granted under this License, and you may not initiate litigation\n \ (including a cross-claim or counterclaim in a lawsuit) alleging that\n \ any patent claim is infringed by making, using, selling, offering for\n \ sale, or importing the Program or any portion of it.\n \ \n \ 11. Patents.\n \ \n \ A \"contributor\" is a copyright holder who authorizes use under this\n \ License of the Program or a work on which the Program is based. The\n \ work thus licensed is called the contributor's \"contributor version\".\n \ \n \ A contributor's \"essential patent claims\" are all patent claims\n \ owned or controlled by the contributor, whether already acquired or\n \ hereafter acquired, that would be infringed by some manner, permitted\n \ by this License, of making, using, or selling its contributor version,\n \ but do not include claims that would be infringed only as a\n \ consequence of further modification of the contributor version. For\n \ purposes of this definition, \"control\" includes the right to grant\n \ patent sublicenses in a manner consistent with the requirements of\n \ this License.\n \ \n \ Each contributor grants you a non-exclusive, worldwide, royalty-free\n \ patent license under the contributor's essential patent claims, to\n \ make, use, sell, offer for sale, import and otherwise run, modify and\n \ propagate the contents of its contributor version.\n \ \n \ In the following three paragraphs, a \"patent license\" is any express\n \ agreement or commitment, however denominated, not to enforce a patent\n \ (such as an express permission to practice a patent or covenant not to\n \ sue for patent infringement). To \"grant\" such a patent license to a\n \ party means to make such an agreement or commitment not to enforce a\n \ patent against the party.\n \ \n \ If you convey a covered work, knowingly relying on a patent license,\n \ and the Corresponding Source of the work is not available for anyone\n \ to copy, free of charge and under the terms of this License, through a\n \ publicly available network server or other readily accessible means,\n \ then you must either (1) cause the Corresponding Source to be so\n \ available, or (2) arrange to deprive yourself of the benefit of the\n \ patent license for this particular work, or (3) arrange, in a manner\n \ consistent with the requirements of this License, to extend the patent\n \ license to downstream recipients. \"Knowingly relying\" means you have\n \ actual knowledge that, but for the patent license, your conveying the\n \ covered work in a country, or your recipient's use of the covered work\n \ in a country, would infringe one or more identifiable patents in that\n \ country that you have reason to believe are valid.\n \ \n \ If, pursuant to or in connection with a single transaction or\n \ arrangement, you convey, or propagate by procuring conveyance of, a\n \ covered work, and grant a patent license to some of the parties\n \ receiving the covered work authorizing them to use, propagate, modify\n \ or convey a specific copy of the covered work, then the patent license\n \ you grant is automatically extended to all recipients of the covered\n \ work and works based on it.\n \ \n \ A patent license is \"discriminatory\" if it does not include within\n \ the scope of its coverage, prohibits the exercise of, or is\n \ conditioned on the non-exercise of one or more of the rights that are\n \ specifically granted under this License. You may not convey a covered\n \ work if you are a party to an arrangement with a third party that is\n \ in the business of distributing software, under which you make payment\n \ to the third party based on the extent of your activity of conveying\n \ the work, and under which the third party grants, to any of the\n \ parties who would receive the covered work from you, a discriminatory\n \ patent license (a) in connection with copies of the covered work\n \ conveyed by you (or copies made from those copies), or (b) primarily\n \ for and in connection with specific products or compilations that\n \ contain the covered work, unless you entered into that arrangement,\n \ or that patent license was granted, prior to 28 March 2007.\n \ \n \ Nothing in this License shall be construed as excluding or limiting\n \ any implied license or other defenses to infringement that may\n \ otherwise be available to you under applicable patent law.\n \ \n \ 12. No Surrender of Others' Freedom.\n \ \n \ If conditions are imposed on you (whether by court order, agreement or\n \ otherwise) that contradict the conditions of this License, they do not\n \ excuse you from the conditions of this License. If you cannot convey a\n \ covered work so as to satisfy simultaneously your obligations under this\n \ License and any other pertinent obligations, then as a consequence you may\n \ not convey it at all. For example, if you agree to terms that obligate you\n \ to collect a royalty for further conveying from those to whom you convey\n \ the Program, the only way you could satisfy both those terms and this\n \ License would be to refrain entirely from conveying the Program.\n \ \n \ 13. Use with the GNU Affero General Public License.\n \ \n \ Notwithstanding any other provision of this License, you have\n \ permission to link or combine any covered work with a work licensed\n \ under version 3 of the GNU Affero General Public License into a single\n \ combined work, and to convey the resulting work. The terms of this\n \ License will continue to apply to the part which is the covered work,\n \ but the special requirements of the GNU Affero General Public License,\n \ section 13, concerning interaction through a network will apply to the\n \ combination as such.\n \ \n \ 14. Revised Versions of this License.\n \ \n \ The Free Software Foundation may publish revised and/or new versions of\n \ the GNU General Public License from time to time. Such new versions will\n \ be similar in spirit to the present version, but may differ in detail to\n \ address new problems or concerns.\n \ \n \ Each version is given a distinguishing version number. If the\n \ Program specifies that a certain numbered version of the GNU General\n \ Public License \"or any later version\" applies to it, you have the\n \ option of following the terms and conditions either of that numbered\n \ version or of any later version published by the Free Software\n \ Foundation. If the Program does not specify a version number of the\n \ GNU General Public License, you may choose any version ever published\n \ by the Free Software Foundation.\n \ \n \ If the Program specifies that a proxy can decide which future\n \ versions of the GNU General Public License can be used, that proxy's\n \ public statement of acceptance of a version permanently authorizes you\n \ to choose that version for the Program.\n \ \n \ Later license versions may give you additional or different\n \ permissions. However, no additional obligations are imposed on any\n \ author or copyright holder as a result of your choosing to follow a\n \ later version.\n \ \n \ 15. Disclaimer of Warranty.\n \ \n \ THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY\n \ APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT\n \ HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM \"AS IS\" WITHOUT WARRANTY\n \ OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,\n \ THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR\n \ PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM\n \ IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF\n \ ALL NECESSARY SERVICING, REPAIR OR CORRECTION.\n \ \n \ 16. Limitation of Liability.\n \ \n \ IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING\n \ WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS\n \ THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY\n \ GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE\n \ USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF\n \ DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD\n \ PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),\n \ EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF\n \ SUCH DAMAGES.\n \ \n \ 17. Interpretation of Sections 15 and 16.\n \ \n \ If the disclaimer of warranty and limitation of liability provided\n \ above cannot be given local legal effect according to their terms,\n \ reviewing courts shall apply local law that most closely approximates\n \ an absolute waiver of all civil liability in connection with the\n \ Program, unless a warranty or assumption of liability accompanies a\n \ copy of the Program in return for a fee.\n \ \n \ END OF TERMS AND CONDITIONS\n \ \n"; ================================================ FILE: README.md ================================================ [![build status](https://gitlab.cern.ch/dss/eos/badges/master/build.svg)](https://gitlab.cern.ch/dss/eos/commits/master) # EOS ## Description **EOS** is a software solution that aims to provide fast and reliable multi-PB disk-only storage technology for both LHC and non-LHC use-cases at CERN. The core of the implementation is the XRootD framework which provides feature-rich remote access protocol. The storage system is running on commodity hardware with disks in JBOD configuration. It is written mostly in C/C++, with some of the extra modules in Python. Files can be accessed via native **XRootD** protocol, a **POSIX-like FUSE** client or **HTTP(S) & WebDav** protocol. ## Documentation The most up-to-date documentation can be found at: [eos-docs.web.cern.ch/eos-docs](http://eos-docs.web.cern.ch/eos-docs/) You will need to install Sphinx, Doxygen and the `solar_theme` (for Sphinx) in order to generate the docs. For up-to-date information on getting Sphinx refer to the [Sphinx docs](https://www.sphinx-doc.org/en/master/usage/installation.html). ```bash ## RHEL instructions # Please choose the relevant python version based on the distro sudo yum install python-sphinx doxygen pip install solar_theme ## Ubuntu 20.04 instructions sudo apt install python3-sphinx doxygen pip3 install solar_theme ``` **Doxygen** documentation of the API is available in the `./doc` directory and can be generated using the following command: ```bash # Inside the EOS git clone directory cd doc doxygen .... # Documentation generated in the ./html directory, viewable with any browser: # file:///eos_git_clone_dir/doc/html/index.html ``` **Sphinx** documentation of installation and application is also in the `./doc` directory. This is what is published at https://eos-docs.web.cern.ch. Documentation can be generated using: ```bash cd doc make html # Documentation can be found in build/html/index.html (view in a browser). # The make interface supports other targets (e.g. latexpdf). ``` ## Project directory structure - `archive/`: Archive tool implementation in Python - `auth_plugin/`: Authorization delegation plugin - `authz/`: Authorization capability functionality - `client/`: gRPC clients - `cmake/`: CMake scripts and functions - `common/`: Common helper files and classes - `console/`: Command line client implementation - `coverage/`: Test coverage config for LCOV - `doc/`: Doxygen and Sphinx documentation - `etc/`: Log rotation files - `fst/`: The Storage Server Plugin (FST) - `fusex/`: Next generation bi-directional FUSE mount client with high-end features - `man/`: Manual pages - `mgm/`: Metadata Namespace and Scheduling Redirector Plugin (MGM) - `misc/`: systemd, sysconfig and service scripts - `mq/`: Message Queue server plugin - `namespace/`: Namespace implementation - `nginx/`: Nginx patches for EOS integration - `proto/`: Protobuf definitions for various components - `test/`: Instance test scripts and dedicated test executables - `unit_tests/`: Unit tests for individual modules - `utils/`: Utilities and uninstall scripts ## Git submodules Some components are maintained in separate upstream repositories and brought in as git submodules. Make sure submodules are initialized and kept up-to-date: ```bash git submodule update --init --recursive # To refresh later git submodule update --recursive --remote ``` Submodules currently used: - `quarkdb/`: QuarkDB client/server sources used by MGM for QuarkDB-backed services (e.g., QDB master, metadata/services that rely on QuarkDB). - `common/xrootd-ssi-protobuf-interface/`: XRootD SSI + Protobuf interface headers used by EOS gRPC/SSI integrations and CTA-related workflows. Tip: See `.gitmodules` for the authoritative list and remote URLs. ## Dependencies Use the EOS Diopside dependency repository. Follow the official installation instructions here: [EOS Diopside Manual – Installation](https://eos-docs.web.cern.ch/diopside/manual/hardware-installation.html#installation). ```bash yum install -y git gcc cmake cmake3 readline readline-devel fuse fuse-devel \ leveldb leveldb-devel binutils-devel zlib zlib-devel zlib-static \ bzip2 bzip2-devel libattr libattr-devel libuuid libuuid-devel \ xfsprogs xfsprogs-devel sparsehash-devel e2fsprogs e2fsprogs-devel \ openssl openssl-devel openssl-static eos-folly eos-rocksdb ncurses \ ncurses-devel ncurses-static protobuf3-devel openldap-devel \ hiredis-devel zeromq-devel jsoncpp-devel xrootd xrootd-server-devel \ xrootd-client-devel xrootd-private-devel cppzmq-devel libcurl-devel \ libevent-devel jemalloc jemalloc-devel ``` ## Build To build **EOS**, you need **gcc (>=7)** with **C++17 features** and **CMake** installed on your system. If you can install ninja, **EOS** supports ninja for builds. ```bash git submodule update --init --recursive # Create build workdir mkdir build-with-ninja cd build-with-ninja # Run CMake (pass -DCLIENT=1 if you only need the client binaries) cmake3 -GNinja .. # Build ninja -j 4 ``` Otherwise, standard Makefile builds are of course possible: ```bash git submodule update --init --recursive # Create build workdir mkdir build cd build # Run CMake (pass -DCLIENT=1 if you only need the client binaries) cmake3 .. # Build make -j 4 ``` ## Install/Uninstall The default behaviour is to install **EOS** at system level using `CMAKE_INSTALL_PREFIX=/usr`. To change the default install prefix path, do the following: ```bash # Modify the default install path cmake ../ -DCMAKE_INSTALL_PREFIX=/other_path # if using ninja ninja install # Uninstall ninja uninstall # Install - might require sudo privileges make install # Uninstall make uninstall ``` ## Source/Binary RPM Generation To build the source/binary RPMs run: ```bash # Create source tarball make dist # Create Source RPM make srpm # Create RPM make rpm ``` ## Bug Reporting You can send **EOS** bug reports to . The preferable way, if you have access, is use the online bug tracking system [Jira][2] to submit new problem reports or search for existing ones: https://its.cern.ch/jira/browse/EOS ## EOS Community For discussions and help, there is also the eos community which brings together users, developers & collaborators at https://eos-community.web.cern.ch/ ## Licence **EOS - The CERN Disk Storage System** **Copyright (C) 2025 CERN/Switzerland** This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY, without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see . [1]: http://eos-docs.web.cern.ch/eos-docs/quickstart/setup_repo.html#eos-base-setup-repos [2]: https://its.cern.ch/jira/secure/Dashboard.jspa ================================================ FILE: archive/CMakeLists.txt ================================================ # ---------------------------------------------------------------------- # File: CMakeLists.txt # Author: Elvin-Alin Sindrilaru - # ---------------------------------------------------------------------- # ************************************************************************ # * EOS - the CERN Disk Storage System * # * Copyright (C) 2011 CERN/Switzerland * # * * # * This program is free software: you can redistribute it and/or modify * # * it under the terms of the GNU General Public License as published by * # * the Free Software Foundation, either version 3 of the License, or * # * (at your option) any later version. * # * * # * This program is distributed in the hope that it will be useful, * # * but WITHOUT ANY WARRANTY; without even the implied warranty of * # * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * # * GNU General Public License for more details. * # * * # * You should have received a copy of the GNU General Public License * # * along with this program. If not, see .* # ************************************************************************ if(PYTHONSITEPKG_FOUND) install(PROGRAMS eosarchived.py eosarch_run.py eosarch_reconstruct.py DESTINATION ${CMAKE_INSTALL_FULL_BINDIR} PERMISSIONS OWNER_READ OWNER_EXECUTE GROUP_READ GROUP_EXECUTE WORLD_READ WORLD_EXECUTE) install(DIRECTORY eosarch DESTINATION ${PYTHONSITEPKG_PATH} PATTERN "tests" EXCLUDE PATTERN "*~" EXCLUDE PERMISSIONS OWNER_READ OWNER_WRITE GROUP_READ WORLD_READ) install(FILES opt-eos-xrootd.pth DESTINATION ${PYTHONSITEPKG_PATH} PERMISSIONS OWNER_READ OWNER_WRITE GROUP_READ WORLD_READ) install(FILES eosarchived.conf DESTINATION ${CMAKE_INSTALL_SYSCONFDIR} PERMISSIONS OWNER_READ OWNER_WRITE GROUP_READ WORLD_READ) # Installing files depending on service manager (systemd) set(SYSTEMD_DIR /usr/lib/systemd/) if(EXISTS ${SYSTEMD_DIR}) install(FILES eosarchived_env.sysconfig DESTINATION ${CMAKE_INSTALL_SYSCONFDIR}/sysconfig/ RENAME eosarchived_env) install(FILES eosarchived.service DESTINATION ${CMAKE_INSTALL_PREFIX}/lib/systemd/system/) endif() endif() ================================================ FILE: archive/eosarch/__init__.py ================================================ #!/usr/bin/python3 # ------------------------------------------------------------------------------ # File: __init__.py # Author: Elvin-Alin Sindrilaru # ------------------------------------------------------------------------------ # # ****************************************************************************** # EOS - the CERN Disk Storage System # Copyright (C) 2014 CERN/Switzerland # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see . # ****************************************************************************** """This module provides access to EOS archives. It also facilitates the operations that are done using such objects. """ from eosarch.configuration import Configuration from eosarch.transfer import Transfer from eosarch.processinfo import ProcessInfo from eosarch.exceptions import NoErrorException, CheckEntryException ================================================ FILE: archive/eosarch/archivefile.py ================================================ #!/usr/bin/python3 # ------------------------------------------------------------------------------ # File: archivefile.py # Author: Elvin-Alin Sindrilaru # ------------------------------------------------------------------------------ # # ****************************************************************************** # EOS - the CERN Disk Storage System # Copyright (C) 2014 CERN/Switzerland # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see . # ****************************************************************************** """ Class modelling an EOS archive file. """ from __future__ import unicode_literals import logging import json from XRootD import client from XRootD.client.flags import QueryCode from eosarch.utils import is_atomic_version_file, seal_path from eosarch.utils import exec_cmd, get_entry_info, set_dir_info from eosarch.exceptions import CheckEntryException class ArchiveFile(object): """ Class modelling an EOS archive file. Attributes: file: File object pointing to local archive file. d2t: True if operation from disk to tape, otherwise False. For backup operations we consider it as a transfer from tape to disk thus it is False. header: Archive header dictionary. """ def __init__(self, path, d2t): """Initialize ArchiveFile object. Args: path (str): Local path to archive file. d2t (bool): True if transfer is to be disk to tape. Raises: IOError: Failed to open local transfer file. """ self.logger = logging.getLogger("transfer") self.d2t = d2t try: self.file = open(path, 'r') except IOError as __: self.logger.error("Failed to open file={0}".format(path)) raise line = self.file.readline() self.header = json.loads(line) self.fseek_dir = self.file.tell() # save start position for dirs pos = self.fseek_dir while line: line = self.file.readline() entry = json.loads(line) if entry[0] == 'f': self.fseek_file = pos # save start position for files break pos = self.file.tell() # Create two XRootD.FileSystem object for source and destination # which are to be reused throughout the transfer. self.fs_src = client.FileSystem(self.header['src']) self.fs_dst = client.FileSystem(self.header['dst']) self.logger.debug("fseek_dir={0}, fseek_file={1}".format(self.fseek_dir, self.fseek_file)) def __del__(self): """Destructor needs to close the file. """ try: self.file.close() except ValueError as __: self.logger.warning("File={0} already closed".format(self.file.name)) def dirs(self): """Generator to read directory entries from the archive file. Returns: Return a directory entry from the archive file which looks like this: ['d', "./rel/path/dir", "val1", ,"val2" ... ] """ self.file.seek(self.fseek_dir) line = self.file.readline() while line: dentry = json.loads(line) if dentry[0] == 'd': yield dentry line = self.file.readline() else: break def files(self): """Generator to read file entries from the archive file. Returns: Return a file entry from the archive file which looks like this: ['f', "./rel/path/file", "val1", ,"val2" ... ] """ self.file.seek(self.fseek_file) line = self.file.readline() while line: fentry = json.loads(line) if fentry[0] == 'f': yield fentry line = self.file.readline() else: break def entries(self): """ Generator to read all entries from the archive file. Return: A list representing a file or directory entry. See above for the actual format. """ for dentry in self.dirs(): yield dentry for fentry in self.files(): yield fentry def get_fs(self, url): """ Get XRootD.FileSystem object matching the host in the url. Args: url (string): XRootD endpoint URL. Returns: FileSystem object to be used or None. """ if url.startswith(self.header['src']): return self.fs_src elif url.startswith(self.header['dst']): return self.fs_dst else: return None def get_endpoints(self, rel_path): """Get full source and destination URLs for the given relative path. For this use the information from the header. Take into account whether it is a disk to tape transfer or not. The src in header is always the disk and dst is the tape. Args: rel_path (str): Entry relative path. Returns: Return a tuple of string representing the source and the destination of the transfer. """ if rel_path == "./": rel_path = "" src = self.header['src'] + rel_path dst = self.header['dst'] + rel_path if self.header['svc_class']: dst = ''.join([dst, "?svcClass=", self.header['svc_class']]) return (src, dst) if self.d2t else (dst, src) def del_entry(self, rel_path, is_dir, tape_delete): """ Delete file/dir. For directories it is successful only if the dir is empty. For deleting the subtree rooted in a directory one needs to use the del_subtree method. Args: rel_path (str): Entry relative path as stored in the archive file. is_dir (bool): True is entry is dir, otherwise False. tape_delete(bool): If tape_delete is None the delete comes from a PUT or GET operations so we only use the value of self.d2t to decide which entry we will delete. If tape_delete is True we delete tape data, otherwise we purge (delete from disk only). Raises: IOError: Deletion could not be performed. """ src, dst = self.get_endpoints(rel_path) if tape_delete is None: surl = dst # self.d2t is already used inside get_endpoints else: surl = src if tape_delete else dst url = client.URL(surl) fs = self.get_fs(surl) self.logger.debug("Delete entry={0}".format(surl)) if is_dir: st_rm, __ = fs.rmdir((url.path + "?eos.ruid=0&eos.rgid=0")) else: st_rm, __ = fs.rm((url.path + "?eos.ruid=0&eos.rgid=0")) if not st_rm.ok: # Check if entry exists st_stat, __ = fs.stat(url.path) if st_stat.ok: err_msg = "Error removing entry={0}".format(surl) self.logger.error(err_msg) raise IOError() self.logger.warning("Entry={0} already removed".format(surl)) def del_subtree(self, rel_path, tape_delete): """ Delete the subtree rooted at the provided path. Walk through all the files and delete them one by one then proceding with the directories from the deepest one to the root. Args: rel_path (string): Relative path to the subtree tape_delete (boolean or None): If present and true this is a deletion otherwise is a purge operation Raises: IOError: Deletion could not be performed """ self.logger.debug("Del subtree for path={0}".format(rel_path)) lst_dirs = [] for fentry in self.files(): path = fentry[1] # Delete only files rooted in current subtree if path.startswith(rel_path): self.del_entry(path, False, tape_delete) for dentry in self.dirs(): path = dentry[1] if rel_path == "./" or path.startswith(rel_path): # Never delete the root path if path != "./": lst_dirs.append(path) # Reverse the list so that we start deleting deepest (empty) dirs first lst_dirs.reverse() for path in lst_dirs: self.del_entry(path, True, tape_delete) def make_mutable(self): """ Make the EOS sub-tree pointed by header['src'] mutable. Raises: IOError when operation fails. """ url = client.URL(self.header['src']) for dentry in self.dirs(): dir_path = url.path + dentry[1] fgetattr = ''.join([url.protocol, "://", url.hostid, "//proc/user/", "?mgm.cmd=attr&mgm.subcmd=get&mgm.attr.key=sys.acl", "&mgm.path=", seal_path(dir_path)]) (status, stdout, __) = exec_cmd(fgetattr) if not status: warn_msg = "No xattr sys.acl found for dir={0}".format(dir_path) self.logger.warning(warn_msg) else: # Remove the 'z:i' rule from the acl list stdout = stdout.replace('"', '') acl_val = stdout[stdout.find('=') + 1:] rules = acl_val.split(',') new_rules = [] for rule in rules: if rule.startswith("z:"): tag, definition = rule.split(':') pos = definition.find('i') if pos != -1: definition = definition[:pos] + definition[pos + 1:] if definition: new_rules.append(':'.join([tag, definition])) continue new_rules.append(rule) acl_val = ','.join(new_rules) self.logger.info("new acl: {0}".format(acl_val)) if acl_val: # Set the new sys.acl xattr fmutable = ''.join([url.protocol, "://", url.hostid, "//proc/user/?", "mgm.cmd=attr&mgm.subcmd=set&mgm.attr.key=sys.acl", "&mgm.attr.value=", acl_val, "&mgm.path=", dir_path]) (status, __, stderr) = exec_cmd(fmutable) if not status: err_msg = "Error making dir={0} mutable, msg={1}".format( dir_path, stderr) self.logger.error(err_msg) raise IOError(err_msg) else: # sys.acl empty, remove it from the xattrs frmattr = ''.join([url.protocol, "://", url.hostid, "//proc/user/?", "mgm.cmd=attr&mgm.subcmd=rm&mgm.attr.key=sys.acl", "&mgm.path=", dir_path]) (status, __, stderr) = exec_cmd(frmattr) if not status: err_msg = ("Error removing xattr=sys.acl for dir={0}, msg={1}" "").format(dir_path, stderr) self.logger.error(err_msg) raise IOError(err_msg) def check_root_dir(self): """ Do the necessary checks for the destination directory depending on the type of the transfer. Raises: IOError: Root dir state inconsistent. """ root_str = self.header['dst' if self.d2t else 'src'] fs = self.get_fs(root_str) url = client.URL(root_str) arg = url.path + "?eos.ruid=0&eos.rgid=0" st, __ = fs.stat(arg) if self.d2t: if st.ok: # For PUT destination dir must NOT exist err_msg = "Root PUT dir={0} exists".format(root_str) self.logger.error(err_msg) raise IOError(err_msg) else: # Make sure the rest of the path exists as for the moment CASTOR # mkdir -p /path/to/file does not work properly pos = url.path.find('/', 1) while pos != -1: dpath = url.path[: pos] pos = url.path.find('/', pos + 1) st, __ = fs.stat(dpath) if not st.ok: st, __ = fs.mkdir(dpath) if not st.ok: err_msg = ("Dir={0} failed mkdir errmsg={1}" "").format(dpath, st.message) self.logger.error(err_msg) raise IOError(err_msg) elif not self.d2t: # For GET destination must exist and contain just the archive file if not st.ok: err_msg = "Root GET dir={0} does NOT exist".format(root_str) self.logger.error(err_msg) raise IOError(err_msg) else: ffindcount = ''.join([url.protocol, "://", url.hostid, "//proc/user/?mgm.cmd=find&mgm.path=", seal_path(url.path), "&mgm.option=Z"]) (status, stdout, stderr) = exec_cmd(ffindcount) if status: for entry in stdout.split(): tag, num = entry.split('=') if ((tag == 'nfiles' and num not in ['1', '2']) or (tag == 'ndirectories' and num != '1')): err_msg = ("Root GET dir={0} should contain at least " "one file and at most two - clean up and " "try again").format(root_str) self.logger.error(err_msg) raise IOError(err_msg) else: err_msg = ("Error doing find count on GET destination={0}" ", msg={1}").format(root_str, stderr) self.logger.error(err_msg) raise IOError(err_msg) def verify(self, best_effort, tx_check_only=False): """ Check the integrity of the archive either on disk or on tape. Args: best_effort (boolean): If True then try to verify all entries even if we get an error during the check. This is used for the backup while for the archive, we return as soon as we find the first error. tx_check_only (boolean): If True then only check the existence of the entry, the size and checksum value. This is done only for archive GET operations. Returns: (status, lst_failed) - Status is True if archive is valid, otherwise false. In case the archive has errors return also the first corrupted entry from the archive file, otherwise return an empty list. For BACKUP operations return the status and the list of entries for which the verfication failed in order to provide a summary to the user. """ self.logger.info("Do transfer verification") status = True lst_failed = [] for entry in self.entries(): try: self._verify_entry(entry, tx_check_only) except CheckEntryException as __: lst_failed.append(entry) status = False if best_effort: continue else: break return (status, lst_failed) def _verify_entry(self, entry, tx_check_only): """ Check that the entry (file/dir) has the proper meta data. Args: entry (list): Entry from the arhive file containing all info about this particular file/directory. tx_check_only (boolean): If True then for files only check their existence, size and checksum values. Raises: CheckEntryException: if entry verification fails. """ self.logger.debug("Verify entry={0}".format(entry)) is_dir, path = (entry[0] == 'd'), entry[1] __, dst = self.get_endpoints(path) url = client.URL(dst) if self.d2t: # for PUT check entry size and checksum if possible fs = self.get_fs(dst) st, stat_info = fs.stat(url.path) if not st.ok: err_msg = "Entry={0} failed stat".format(dst) self.logger.error(err_msg) raise CheckEntryException("failed stat") if not is_dir: # check file size match indx = self.header["file_meta"].index("size") + 2 orig_size = int(entry[indx]) if stat_info.size != orig_size: err_msg = ("Verify entry={0}, expect_size={1}, size={2}" "").format(dst, orig_size, stat_info.size) self.logger.error(err_msg) raise CheckEntryException("failed file size match") # Check checksum only if it is adler32 - only one supported by CASTOR indx = self.header["file_meta"].index("xstype") + 2 # !!!HACK!!! Check the checksum only if file size is not 0 since # CASTOR does not store any checksum for 0 size files if stat_info.size != 0 and entry[indx] == "adler": indx = self.header["file_meta"].index("xs") + 2 xs = entry[indx] st, xs_resp = fs.query(QueryCode.CHECKSUM, url.path) if not st.ok: err_msg = "Entry={0} failed xs query".format(dst) self.logger.error(err_msg) raise CheckEntryException("failed xs query") # Result has an annoying \x00 character at the end and it # contains the xs type (adler32) and the xs value resp = xs_resp.split(b'\x00')[0].split() # If checksum value is not 8 char long then we need padding if len(resp[1]) != 8: resp[1] = "{0:0>8}".format(resp[1]) if resp[0] == "adler32" and resp[1] != xs: err_msg = ("Entry={0} xs value missmatch xs_expected={1} " "xs_got={2}").format(dst, xs, resp[1]) self.logger.error(err_msg) raise CheckEntryException("xs value missmatch") else: # for GET check all metadata if is_dir: tags = self.header['dir_meta'] else: tags = self.header['file_meta'] try: if self.header['twindow_type'] and self.header['twindow_val']: dfile = dict(zip(tags, entry[2:])) twindow_sec = int(self.header['twindow_val']) tentry_sec = int(float(dfile[self.header['twindow_type']])) if tentry_sec < twindow_sec: # No check for this entry return # This is a backup so don't check atomic version files if is_atomic_version_file(entry[1]): return except KeyError as __: # This is not a backup transfer but an archive one, carry on pass try: meta_info = get_entry_info(url, path, tags, is_dir) except (AttributeError, IOError, KeyError) as __: self.logger.error("Failed getting metainfo entry={0}".format(dst)) raise CheckEntryException("failed getting metainfo") # Check if we have any excluded xattrs try: excl_xattr = self.header['excl_xattr'] except KeyError as __: excl_xattr = list() if is_dir and excl_xattr: # For directories and configurations containing excluded xattrs # we refine the checks. If "*" in excl_xattr then no check is done. if "*" not in excl_xattr: ref_dict = dict(zip(tags, entry[2:])) new_dict = dict(zip(tags, meta_info[2:])) for key, val in ref_dict.items(): if not isinstance(val, dict): if new_dict[key] != val: err_msg = ("Verify failed for entry={0} expect={1} got={2}" " at key={3}").format(dst, entry, meta_info, key) self.logger.error(err_msg) raise CheckEntryException("failed metainfo match") else: for kxattr, vxattr in val.items(): if kxattr not in excl_xattr: if vxattr != new_dict[key][kxattr]: err_msg = ("Verify failed for entry={0} expect={1} got={2}" " at xattr key={3}").format(dst, entry, meta_info, kxattr) self.logger.error(err_msg) raise CheckEntryException("failed metainfo match") else: # For files with tx_check_only verification, we refine the checks if tx_check_only and not is_dir: idx_size = self.header["file_meta"].index("size") + 2 idx_xstype = self.header["file_meta"].index("xstype") + 2 idx_xsval = self.header["file_meta"].index("xs") + 2 if (meta_info[idx_size] != entry[idx_size] or meta_info[idx_xstype] != entry[idx_xstype] or meta_info[idx_xsval] != entry[idx_xsval]): err_msg = ("Partial verify failed for entry={0} expect={1} got={2}" "").format(dst, entry, meta_info) self.logger.error(err_msg) raise CheckEntryException("failed metainfo partial match") else: if is_dir: # Compensate for the removal fo the S_ISGID bit mask_mode = int("02000", base=8) val_mode = int(entry[4], base=8) val_mode |= mask_mode compat_entry = list(entry) compat_entry[4] = "{0:o}".format(val_mode) else: compat_entry = list(entry) if not meta_info == entry and not compat_entry == entry: err_msg = ("Verify failed for entry={0} expect={1} got={2}" "").format(dst, entry, meta_info) self.logger.error(err_msg) raise CheckEntryException("failed metainfo match") self.logger.info("Entry={0}, status={1}".format(dst, True)) def mkdir(self, dentry): """ Create directory and optionally for GET operations set the metadata information. Args: dentry (list): Directory entry as read from the archive file. Raises: IOError: Directory creation failed. """ __, surl = self.get_endpoints(dentry[1]) fs = self.get_fs(surl) url = client.URL(surl) # Create directory if not already existing st, __ = fs.stat((url.path + "?eos.ruid=0&eos.rgid=0")) if not st.ok: if not self.d2t: st, __ = fs.mkdir((url.path + "?eos.ruid=0&eos.rgid=0")) else: st, __ = fs.mkdir((url.path)) if not st.ok: err_msg = ("Dir={0} failed mkdir errmsg={1}, errno={2}, code={3}" "").format(surl, st.message, st.errno, st.code) self.logger.error(err_msg) raise IOError(err_msg) # For GET operations set also the metadata if not self.d2t: dict_dinfo = dict(zip(self.header['dir_meta'], dentry[2:])) # Get the list of excluded extended attributes if it exists try: excl_xattr = self.header['excl_xattr'] except KeyError as __: excl_xattr = list() try: set_dir_info(surl, dict_dinfo, excl_xattr) except IOError as __: err_msg = "Dir={0} failed setting metadata".format(surl) self.logger.error(err_msg) raise IOError(err_msg) ================================================ FILE: archive/eosarch/asynchandler.py ================================================ #!/usr/bin/python3 # ------------------------------------------------------------------------------ # File: asynchandler.py # Author: Elvin-Alin Sindrilaru # ------------------------------------------------------------------------------ # # ****************************************************************************** # EOS - the CERN Disk Storage System # Copyright (C) 2014 CERN/Switzerland # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see . # ****************************************************************************** """ Objects used for handling asynchronous XRootD requests. """ import logging from threading import Condition class _MkDirHandler(object): """ Async mkdir handler which reports to MetaHandler. Attributes: path (string): Directory path for which the handler is created. meta_handler (MetaHandler): Meta handler object. """ def __init__(self, path, meta_handler): self.type = 'mkdir' self.path = path self.meta_handler = meta_handler def __call__(self, status, response, hostlist): self.meta_handler.handle(self.type, status, self.path) class _PrepareHandler(object): """ Async prepare handler which reports to MetaHandler. Attributes: path (string): Directory path for which the handler is created. meta_handler (MetaHandler): Meta handler object. """ def __init__(self, path, meta_handler): self.type = 'prepare' self.path = path self.meta_handler = meta_handler def __call__(self, status, response, hostlist): self.meta_handler.handle(self.type, status, self.path) class _QueryHandler(object): """ Async query handler which reports to MetaHandler. Attributes: path (string): File path for which the handler is created. meta_handler (MetaHandler): Meta handler object. """ def __init__(self, path, meta_handler): self.type = 'query' self.path = path self.meta_handler = meta_handler def __call__(self, status, response, hostlist): self.meta_handler.handle(self.type, status, self.path) class MetaHandler(object): """ Meta handler for different types of async requests. Attributes: cond: Condition variable used for synchronization. logger: Logger object. mkdir_failed: List of directories failed to create. mkdir_status: Status of mkdir requests, logical and between individual mkdir commands. mkdir_num: Number of mkdir commands waiting for reply. """ def __init__(self): list_op = ['mkdir', 'prepare', 'query'] self.num, self.status, self.err_msg, self.failed = {}, {}, {}, {} self.handlers = {'mkdir': _MkDirHandler, 'prepare': _PrepareHandler, 'query': _QueryHandler} for op in list_op: self.num[op] = 0 self.status[op] = True self.err_msg[op] = "" self.failed[op] = [] self.cond = Condition() self.logger = logging.getLogger("transfer") def register(self, op, path): """ Register handler for operation. """ self.cond.acquire() self.num[op] += 1 self.cond.release() return self.handlers[op](path, self) def handle(self, op, status, path): """Handle incoming response. """ self.cond.acquire() self.status[op] = self.status[op] and status.ok self.num[op] -= 1 if not status.ok: self.failed[op].append(path) self.err_msg[op] = status.message if self.num[op] == 0: self.cond.notifyAll() self.cond.release() def wait(self, op): """Wait for all responses to arrive. """ self.cond.acquire() while self.num[op] != 0: self.cond.wait() if self.failed[op]: self.logger.error(("List of failed {0} paths is: {1}, err_msg= {2}" "").format(op, self.failed[op], self.err_msg[op])) else: self.logger.debug("All {0} requests were successful".format(op)) self.cond.release() return self.status[op] ================================================ FILE: archive/eosarch/configuration.py ================================================ # ------------------------------------------------------------------------------ # File: configuration.py # Author: Elvin-Alin Sindrilaru # ------------------------------------------------------------------------------ # # ****************************************************************************** # EOS - the CERN Disk Storage System # Copyright (C) 2014 CERN/Switzerland # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see . # ****************************************************************************** """ Class holding information about the configuration parameters used by both the eosarchived daemon and also each individual transfer process. """ from __future__ import unicode_literals from __future__ import print_function import os import sys import logging import logging.handlers class Configuration(object): """ Configuration class for the archiving daemon and the transfer processes. """ def __init__(self): """ Initialize the configuration by reading in all the parameters from the configuration file supplied. First of all, get any environment variables and setup constants based on them. Args: fn_conf (string): Path to the configuration file, which in normal conditions should be /etc/eosarchived.conf """ try: LOG_DIR = os.environ["LOG_DIR"] except KeyError as __: print("LOG_DIR env. not found", file=sys.stderr) raise try: self.__dict__['EOS_ARCHIVE_DIR'] = os.environ["EOS_ARCHIVE_DIR"] except KeyError as __: print("EOS_ARCHIVE_DIR env. not found", file=sys.stderr) raise try: archive_conf = os.environ["EOS_ARCHIVE_CONF"] except KeyError as __: print("EOS_ARCHIVE_CONF env. not found using /etc/eosarchived.conf", file=sys.stderr) archive_conf = "/etc/eosarchived.conf" log_dict = {"debug": logging.DEBUG, "notice": logging.INFO, "info": logging.INFO, "warning": logging.WARNING, "error": logging.ERROR, "crit": logging.CRITICAL, "alert": logging.CRITICAL} self.__dict__['FRONTEND_IPC'] = ''.join([self.__dict__['EOS_ARCHIVE_DIR'], "archive_frontend.ipc"]) self.__dict__['BACKEND_REQ_IPC'] = ''.join([self.__dict__['EOS_ARCHIVE_DIR'], "archive_backend_req.ipc"]) self.__dict__['BACKEND_PUB_IPC'] = ''.join([self.__dict__['EOS_ARCHIVE_DIR'], "archive_backend_pub.ipc"]) self.__dict__['LOG_FILE'] = LOG_DIR + "eosarchived.log" self.__dict__['CREATE_OP'] = 'create' self.__dict__['GET_OP'] = 'get' self.__dict__['PUT_OP'] = 'put' self.__dict__['TX_OP'] = 'transfers' self.__dict__['PURGE_OP'] = 'purge' self.__dict__['DELETE_OP'] = 'delete' self.__dict__['KILL_OP'] = 'kill' self.__dict__['BACKUP_OP'] = 'backup' self.__dict__["STATS"] = 'stats' self.__dict__['OPT_RETRY'] = 'retry' self.__dict__['OPT_FORCE'] = 'force' self.__dict__['ARCH_FN'] = ".archive" self.__dict__['ARCH_INIT'] = ".archive.init" self.__dict__['ARCHIVE_MAX_TIMEOUT'] = '86400' try: with open(archive_conf, 'r') as f: for line in f: line = line.strip('\0\n ') if len(line) and line[0] != '#': tokens = line.split('=', 1) # Try to convert to int by default try: self.__dict__[tokens[0]] = int(tokens[1]) except ValueError as __: if tokens[0] == 'LOG_LEVEL': self.__dict__[tokens[0]] = log_dict[tokens[1]] else: self.__dict__[tokens[0]] = tokens[1] except IOError as __: print("Unable to open config file: {0}".format(archive_conf), file=sys.stderr) raise # If no loglevel is set use INFO try: self.__dict__['LOG_LEVEL'] except KeyError as __: self.__dict__['LOG_LEVEL'] = logging.INFO # Mapping between operation type and store path for transfer and log files self.__dict__['DIR'] = {} self.logger, self.handler = None, None def start_logging(self, logger_name, log_file, timed_rotating = False): """ Configure the logging Args: logger_name (string): Name of the logger timed_rotating (boolean): If True is a TimedRotatingFileHandler """ log_format = ('%(asctime)-15s %(name)s[%(process)d] %(filename)s:' '%(lineno)d LVL=%(levelname)s %(message)s') logging.basicConfig(level=self.__dict__['LOG_LEVEL'], format=log_format) self.__dict__['LOGGER_NAME'] = logger_name self.__dict__['LOG_FILE'] = log_file self.logger = logging.getLogger(self.__dict__['LOGGER_NAME']) formatter = logging.Formatter(log_format) permissions = 0o644; if timed_rotating: self.handler = logging.handlers.TimedRotatingFileHandler( self.__dict__['LOG_FILE'], 'midnight', encoding="utf-8") else: self.handler = logging.FileHandler(self.__dict__['LOG_FILE'], encoding="utf-8") try: os.chmod(self.__dict__['LOG_FILE'], permissions) except OSError as ex: # If we don't have access to change the permissions, we need to # rely on the initial file creator having done the chmod pass self.handler.setFormatter(formatter) self.logger.addHandler(self.handler) self.logger.propagate = False def display(self): """ Print configuration either to the log file or stderr """ try: self.logger.info("Configuration parameters:") for key, val in self.__dict__.items(): if key.isupper(): self.logger.info("conf.{0} = {1}".format(key, val)) except AttributeError as __: print("Configuration parameters:", file=sys.stderr) for key, val in self.__dict__.items(): if key.isupper(): print("conf.{0} = {1}".format(key, val), file=sys.stderr) def __setattr__(self, name, value): """ Set object attribute Args: name (string): Attribute name value (string): Attribute value """ self.__dict__[name] = value ================================================ FILE: archive/eosarch/exceptions.py ================================================ # ------------------------------------------------------------------------------ # File: exceptions.py # Author: Elvin-Alin Sindrilaru # ------------------------------------------------------------------------------ # # ****************************************************************************** # EOS - the CERN Disk Storage System # Copyright (C) 2014 CERN/Switzerland # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see . # ****************************************************************************** """ Module containing user defined exceptions.""" class NoErrorException(Exception): """ Exception raised in case we were requested to retry an operation but after the initial check there were no errors found. """ pass class CheckEntryException(Exception): """ Exception raised in cache a verify entry operation failes. """ pass class NotOnTapeException(Exception): """ Exception raised when a file is not on tape after the maximum configured timeout per entry """ ================================================ FILE: archive/eosarch/processinfo.py ================================================ # ------------------------------------------------------------------------------ # File: processinfo.py # Author: Elvin-Alin Sindrilaru # ------------------------------------------------------------------------------ # # ****************************************************************************** # EOS - the CERN Disk Storage System # Copyright (C) 2014 CERN/Switzerland # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see . # ****************************************************************************** """ Class emulating the process information for an archive/backup transfer which is used by the eosarchived daemon to display the status of the ongoing transfers. """ import time import logging from os import kill from hashlib import sha256 class ProcessInfo(object): """ Class containing information about a process. It can also hold information about an orphan process and in this case the self.proc is None. Attributes: proc (Process): Process object uid (int): UID of transfer owner gid (int): GID of transfer owner uuid (string): uuid of transfer pid (int): PID of process executing the transfer root_dir (string): Root directory in EOS of the archive/backup op (string): Operation type orig_req (JSON): JSON object representing the original request """ def __init__(self, req_json = None): """ Initializing the process info object Args: req_json (JSON): Json object containing the following information: cmd: type of operation src: EOS url to the archive/backup file uid: UID of the user triggering the archiving gid: GID of the user triggering the archiving """ self.logger = logging.getLogger("dispatcher") self.proc = None self.orig_req = req_json; if req_json: # Normal, 'owned' process self.uid = int(req_json['uid']) self.gid = int(req_json['gid']) self.status = "pending" self.pid, self.op = 0, req_json['cmd'] # Extract the archive/backup root directory path src = req_json['src'] pos = src.find("//", src.find("//") + 1) + 1 self.root_dir = src[pos : src.rfind('/') + 1] self.uuid = sha256(self.root_dir.encode()).hexdigest() self.timestamp = time.time() def update(self, dict_info): """ Update process information Args: dict_info (dict): Dictionary containing the following information about an orphan process: uuid, pid, root_dir, op, status, uid, gid. If this is not the orphan discovery step then we only have the status field. """ self.status = dict_info['status'] try: # Update for orphan processes if information present self.uuid = dict_info['uuid'] self.root_dir = dict_info['root_dir'] self.op = dict_info['op'] self.status = dict_info['status'] self.pid = int(dict_info['pid']) self.uid = int(dict_info['uid']) self.gid = int(dict_info['gid']) self.timestamp = float(dict_info['timestamp']) except KeyError as __: # This response is only a status update pass def is_alive(self): """ Check if the underlying process is alive. For processes started by the current dispatcher i.e. for which we hold a reference to the Process object we can use is_alive() method, for orphan processes we use the OS functionality and send it a signal to check if it is still running. Returns: True if process alive, false otherwise """ if self.proc: ret = self.proc.poll() if ret != None: info_msg = ("Uuid={0}, pid={1}, op={2}, path={3} has terminated " "returncode={4}").format(self.uuid, self.pid, self.op, self.root_dir, ret) self.logger.info(info_msg) return False else: try: kill(self.pid, 0) except OSError as __: dbg_msg = ("Uuid={0}, pid={1}, op={2}, path={3} has terminated - " "no returncode available").format(self.uuid, self.pid, self.op, self.root_dir) self.logger.debug(dbg_msg) return False return True ================================================ FILE: archive/eosarch/tests/__init__.py ================================================ # ------------------------------------------------------------------------------ # File: __init__.py # Author: Elvin-Alin Sindrilaru # ------------------------------------------------------------------------------ # # ****************************************************************************** # EOS - the CERN Disk Storage System # Copyright (C) 2014 CERN/Switzerland # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see . # ****************************************************************************** ================================================ FILE: archive/eosarch/tests/env.py ================================================ # ------------------------------------------------------------------------------ # File: env.py # Author: Elvin-Alin Sindrilaru # ------------------------------------------------------------------------------ # # ****************************************************************************** # EOS - the CERN Disk Storage System # Copyright (C) 2014 CERN/Switzerland # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see . # ****************************************************************************** SERVER_URL="root://localhost/" EOS_DIR="/eos/dev/test/" LOCAL_FILE="test_file.dat" ================================================ FILE: archive/eosarch/tests/test_archivefile.py ================================================ # ------------------------------------------------------------------------------ # File: test_archivefile.py # Author: Elvin-Alin Sindrilaru # ------------------------------------------------------------------------------ # # ****************************************************************************** # EOS - the CERN Disk Storage System # Copyright (C) 2014 CERN/Switzerland # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see . # ****************************************************************************** import os import unittest import json from archivefile.utils import exec_cmd from archivefile.archivefile import ArchiveFile from XRootD import client from env import * def test_exec_cmd(): """Check the exec command. List directory extended attributes from EOS local instance. """ url = client.URL(''.join([SERVER_URL, EOS_DIR])) flsattr = ''.join([url.protocol, "://", url.hostid, "//proc/user/", "?mgm.cmd=attr&mgm.subcmd=ls&mgm.path=", EOS_DIR]) (status, stdout, __) = exec_cmd(flsattr) assert(status) class TestArchiveFile(unittest.TestCase): """ Unittest class for ArchiveFile.""" def setUp(self): """ SetUp function.""" self.local_path = os.getcwd() + '/' + LOCAL_FILE self.d2t = True self.arch = ArchiveFile(self.local_path, self.d2t) def tearDown(self): """TearDown function.""" pass def test_list_dirs(self): """Check generator dir listing. """ with open(self.local_path, 'r') as farch: _ = farch.readline() # skip the header for dentry in self.arch.dirs(): for line in farch: entry = json.loads(line) if entry[0] == 'd': self.assertEqual(entry, dentry) break def test_list_files(self): """Check generator file listing. """ with open(self.local_path, 'r') as farch: _ = farch.readline() # skip the header for fentry in self.arch.files(): for line in farch: entry = json.loads(line) if entry[0] == 'f': self.assertEqual(entry, fentry) break def test_list_entries(self): """Check generator of all entries. """ with open(self.local_path, 'r') as farch: _ = farch.readline() # skip the header for aentry in self.arch.entries(): for line in farch: entry = json.loads(line) self.assertEqual(entry, aentry) break def test_get_endpoints(self): """Check endpoints based on transfer type. """ for aentry in self.arch.entries(): src, dst = self.arch.get_endpoints(aentry[1]) self.assertTrue(src.find(self.arch.header['src']) == 0) self.assertTrue(dst.find(self.arch.header['dst']) == 0) self.d2t = False # test tape to disk self.arch = ArchiveFile(self.local_path, self.d2t) for aentry in self.arch.entries(): src, dst = self.arch.get_endpoints(aentry[1]) self.assertTrue(src.find(self.arch.header['dst']) == 0) self.assertTrue(dst.find(self.arch.header['src']) == 0) ================================================ FILE: archive/eosarch/transfer.py ================================================ # ------------------------------------------------------------------------------ # File: tranfer.py # Author: Elvin-Alin Sindrilaru # ------------------------------------------------------------------------------ # # ****************************************************************************** # EOS - the CERN Disk Storage System # Copyright (C) 2014 CERN/Switzerland # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see . # ****************************************************************************** """Module responsible for executing the transfers. """ from __future__ import unicode_literals from __future__ import division import os import time import logging import threading import zmq import ast from os.path import join from time import sleep from random import randrange from hashlib import sha256 from XRootD import client from XRootD.client.flags import PrepareFlags, QueryCode, OpenFlags, StatInfoFlags from eosarch.archivefile import ArchiveFile from eosarch.utils import exec_cmd, is_version_file from eosarch.asynchandler import MetaHandler from eosarch.exceptions import NoErrorException, NotOnTapeException class ThreadJob(threading.Thread): """ Job executing a client.CopyProcess in a separate thread. This makes sense since a third-party copy job is mostly waiting for the completion of the job not doing any other operations and therefore not using the GIL too much. Attributes: status (bool): Final status of the job lst_jobs (list): List of jobs to be executed proc (client.CopyProcess): Copy process which is being executed retires (int): Number of times this job was retried """ def __init__(self, jobs, retry=0): """Constructor Args: jobs (list): List of transfers to be executed retry (int): Number of times this job was retried """ threading.Thread.__init__(self) self.retries = retry self.xrd_status = None self.lst_jobs = list(jobs) def run(self): """ Run method """ self.retries += 1 proc = client.CopyProcess() for job in self.lst_jobs: # If file is 0-size then we do a normal copy, otherwise we enforce # a TPC transfer tpc_flag = "none" if (int(job[2]) != 0): tpc_flag = "only" # TODO: use the parallel mode starting with XRootD 4.1 proc.add_job(job[0], job[1], force=True, thirdparty=tpc_flag, tpctimeout=3600) self.xrd_status = proc.prepare() if self.xrd_status.ok: self.xrd_status, __ = proc.run() class ThreadStatus(threading.Thread): """ Thread responsible for replying to any requests comming from the dispatcher process. """ def __init__(self, transfer): """ Constructor Args: transfer (Transfer): Current transfer object """ threading.Thread.__init__(self) # TODO: drop the logger as it may interfere with the main thread self.logger = logging.getLogger("transfer") self.transfer = transfer self.run_status = True self.lock = threading.Lock() def run(self): """ Run method """ self.logger.info("Starting the status thread") ctx = zmq.Context() socket_rr = ctx.socket(zmq.DEALER) socket_rr.connect("ipc://" + self.transfer.config.BACKEND_REQ_IPC) socket_ps = ctx.socket(zmq.SUB) mgr_filter = b"[MASTER]" addr = "ipc://" + self.transfer.config.BACKEND_PUB_IPC socket_ps.connect(addr) socket_ps.setsockopt(zmq.SUBSCRIBE, mgr_filter) while self.keep_running(): if socket_ps.poll(5000): try: [__, msg] = socket_ps.recv_multipart() except zmq.ZMQError as err: if err.errno == zmq.ETERM: self.logger.error("ETERM error") break # shutting down, exit else: self.logger.exception(err) continue except Exception as err: self.logger.exception(err) self.logger.debug("RECV_MSG: {0}".format(msg)) dict_cmd = ast.literal_eval(msg.decode()) if dict_cmd['cmd'] == 'orphan_status': self.logger.info("Reconnect to master ... ") resp = ("{{'uuid': '{0}', " "'pid': '{1}', " "'uid': '{2}'," "'gid': '{3}'," "'root_dir': '{4}', " "'op': '{5}'," "'status': '{6}', " "'timestamp': '{7}'" "}}").format(self.transfer.uuid, self.transfer.pid, self.transfer.uid, self.transfer.gid, self.transfer.root_dir, self.transfer.oper, self.transfer.get_status(), self.transfer.timestamp) elif dict_cmd['cmd'] == 'status': resp = ("{{'uuid': '{0}', " "'status': '{1}'" "}}").format(self.transfer.uuid, self.transfer.get_status()) else: self.logger.error("Unknown command: {0}".format(dict_cmd)) continue self.logger.info("Sending response: {0}".format(resp)) socket_rr.send_multipart([resp.encode()], zmq.NOBLOCK) def do_finish(self): """ Set the flag for the status thread to finish execution """ self.lock.acquire() self.run_status = False self.lock.release() def keep_running(self): """ Check if we continue running - the transfer is ongoing Returns: True if status thread should keep running, otherwise False """ self.lock.acquire() ret = self.run_status self.lock.release() return ret class Transfer(object): """ Trasfer archive object Attributes: req_json (JSON): Command received from the EOS MGM. Needs to contains the following entries: cmd, src, opt, uid, gid threads (list): List of threads doing partial transfers(CopyProcess jobs) """ def __init__(self, req_json, config): self.config = config self.oper = req_json['cmd'] self.uid, self.gid = req_json['uid'], req_json['gid'] self.do_retry = (req_json['opt'] == self.config.OPT_RETRY) self.force = (req_json['opt'] == self.config.OPT_FORCE) self.efile_full = req_json['src'] self.efile_root = self.efile_full[:-(len(self.efile_full) - self.efile_full.rfind('/') - 1)] self.root_dir = self.efile_root[self.efile_root.rfind('//') + 1:] self.uuid = sha256(self.root_dir.encode()).hexdigest() local_file = join(self.config.DIR[self.oper], self.uuid) self.tx_file = local_file + ".tx" self.list_jobs, self.threads = [], [] self.pid = os.getpid() self.archive = None # Special case for inital PUT as we need to copy also the archive file self.init_put = self.efile_full.endswith(self.config.ARCH_INIT) self.status = "initializing" self.lock_status = threading.Lock() self.timestamp = time.time() self.logger = logging.getLogger("transfer") self.thread_status = ThreadStatus(self) def get_status(self): """ Get current status Returns: String representing the status """ self.lock_status.acquire() ret = self.status self.lock_status.release() return ret def set_status(self, msg): """ Set current status Args: msg (string): New status """ self.lock_status.acquire() self.status = msg self.lock_status.release() def run(self): """ Run requested operation - fist call prepare Raises: IOError """ self.thread_status.start() if self.oper in [self.config.PUT_OP, self.config.GET_OP]: self.archive_prepare() if self.do_retry: self.do_retry_transfer() else: try: self.do_transfer() except NotOnTapeException as _: self.logger.notice("Doing transfer re-try due to missing file on tape") self.do_retry_transfer() elif self.oper in [self.config.PURGE_OP, self.config.DELETE_OP]: self.archive_prepare() self.do_delete((self.oper == self.config.DELETE_OP)) elif self.oper == self.config.BACKUP_OP: self.backup_prepare() self.do_backup() def archive_prepare(self): """ Prepare requested archive operation. Raises: IOError: Failed to rename or transfer archive file. """ # Rename archive file in EOS efile_url = client.URL(self.efile_full) eosf_rename = ''.join([self.efile_root, self.config.ARCH_FN, ".", self.oper, ".err"]) rename_url = client.URL(eosf_rename) frename = ''.join([rename_url.protocol, "://", rename_url.hostid, "//proc/user/?mgm.cmd=file&mgm.subcmd=rename" "&mgm.path=", efile_url.path, "&mgm.file.source=", efile_url.path, "&mgm.file.target=", rename_url.path]) (status, __, stderr) = exec_cmd(frename) if not status: err_msg = ("Failed to rename archive file {0} to {1}, msg={2}" "").format(self.efile_full, rename_url, stderr) self.logger.error(err_msg) raise IOError(err_msg) # Copy archive file from EOS to the local disk self.efile_full = eosf_rename eos_fs = client.FileSystem(self.efile_full) st, _ = eos_fs.copy(self.efile_full + "?eos.ruid=0&eos.rgid=0", self.tx_file, True) if not st.ok: err_msg = ("Failed to copy archive file={0} to local disk at={1}" "").format(self.efile_full, self.tx_file) self.logger.error(err_msg) raise IOError(err_msg) # Create the ArchiveFile object d2t = (self.oper == self.config.PUT_OP) self.archive = ArchiveFile(self.tx_file, d2t) def do_delete(self, tape_delete): """ Delete archive either from disk (purge) or from tape (delete) Args: tape_delete (boolean): If true delete data from tape, otherwise from disk. Raises: IOError: Failed to delete an entry. """ del_dirs = [] self.logger.info("Do delete with tape_delete={0}".format(tape_delete)) # Delete also the archive file saved on tape if tape_delete: self.archive.del_entry(self.config.ARCH_INIT, False, tape_delete) # First remove all the files and then the directories for fentry in self.archive.files(): # d2t is false for both purge and deletion self.archive.del_entry(fentry[1], False, tape_delete) for dentry in self.archive.dirs(): # Don't remove the root directory when purging if not tape_delete and dentry[1] == './': continue del_dirs.append(dentry[1]) # Remove the directories from bottom up while len(del_dirs): dpath = del_dirs.pop() self.archive.del_entry(dpath, True, tape_delete) # Remove immutable flag from the EOS sub-tree if tape_delete: self.archive.make_mutable() self.archive_tx_clean(True) def do_transfer(self): """ Execute a put or get operation. Raises: IOError when an IO opperations fails. """ t0 = time.time() indx_dir = 0 # Create directories for dentry in self.archive.dirs(): if dentry[1] == "./": self.archive.check_root_dir() indx_dir += 1 self.archive.mkdir(dentry) msg = "create dir {0}/{1}".format(indx_dir, self.archive.header['num_dirs']) self.set_status(msg) # For GET issue the Prepare2Get for all the files on tape self.prepare2get() # Copy files self.copy_files() # For GET set file ownership and permissions self.update_file_access() # Verify the transferred entries self.set_status("verifying") check_ok, __ = self.archive.verify(False) # For PUT operations wait that all the files are on tape and for GET # send a "prepare evict" request to CTA to clear the disk cache if self.archive.d2t: self.set_status("wait_on_tape") self.wait_on_tape() else: self.set_status("evict_disk_cache") try: self.evict_disk_cache() except OverflowError as __: self.logger.warning("The XRootD Python bindings do not support " "the evict flag yet!") self.set_status("cleaning") self.logger.info("TIMING_transfer={0} sec".format(time.time() - t0)) self.archive_tx_clean(check_ok) def do_retry_transfer(self): """ Execute a put or get retry operation. Raises: IOError when an IO opperations fails. """ t0 = time.time() indx_dir = 0 err_entry = None tx_ok, meta_ok = True, True found_checkpoint = False # flag set when reaching recovery entry # Get the first corrupted entry and the type of corruption (tx_ok, meta_ok, lst_failed) = self.check_previous_tx() if not tx_ok or not meta_ok: err_entry = lst_failed[0] # Create directories for dentry in self.archive.dirs(): # Search for the recovery checkpoint if not found_checkpoint: if dentry != err_entry: indx_dir += 1 continue else: found_checkpoint = True indx_dir += 1 self.archive.mkdir(dentry) msg = "create dir {0}/{1}".format(indx_dir, self.archive.header['num_dirs']) self.set_status(msg) if not tx_ok: # For GET issue the Prepare2Get for all the files on tape self.prepare2get(err_entry, found_checkpoint) # Copy files self.copy_files(err_entry, found_checkpoint) # For GET set file ownership and permissions for all entries self.update_file_access(err_entry, found_checkpoint) else: # For GET metadata errors set file ownership and permissions only # for entries after the first corrupted one self.update_file_access() # Verify the transferred entries self.set_status("verifying") check_ok, __ = self.archive.verify(False) # For PUT operations wait that all the files are on tape if self.archive.d2t: self.set_status("wait_on_tape") self.wait_on_tape() else: self.set_status("evict_disk_cache") try: self.evict_disk_cache() except OverflowError as __: self.logger.warning("The XRootD Python bindings do not support " "the evict flag yet!") self.set_status("cleaning") self.logger.info("TIMING_transfer={0} sec".format(time.time() - t0)) self.archive_tx_clean(check_ok) def tx_clean(self, check_ok): """ Clean a backup/archive transfer depending on its type. """ if self.oper == self.config.BACKUP_OP: self.backup_tx_clean() else: self.archive_tx_clean(check_ok) def backup_tx_clean(self): """ Clean after a backup transfer by copying the log file in the same directory as the destiantion of the backup. """ # Copy local log file to EOS directory eos_log = ''.join([self.efile_root, ".sys.b#.backup.log?eos.ruid=0&eos.rgid=0"]) self.logger.debug("Copy log:{0} to {1}".format(self.config.LOG_FILE, eos_log)) self.config.handler.flush() cp_client = client.FileSystem(self.efile_full) st, __ = cp_client.copy(self.config.LOG_FILE, eos_log, force=True) if not st.ok: self.logger.error(("Failed to copy log file {0} to EOS at {1}" "").format(self.config.LOG_FILE, eos_log)) else: # Delete log file if it was successfully copied to EOS try: os.remove(self.config.LOG_FILE) except OSError as __: pass # Delete all local files associated with this transfer try: os.remove(self.tx_file) except OSError as __: pass # Join async status thread self.thread_status.do_finish() self.thread_status.join() def archive_tx_clean(self, check_ok): """ Clean the transfer by renaming the archive file in EOS adding the following extensions: .done - the transfer was successful .err - there were errors during the transfer. These are logged in the file .archive.log in the same directory. Args: check_ok (bool): True if no error occured during transfer, otherwise false. """ # Rename arch file in EOS to reflect the status if not check_ok: eosf_rename = ''.join([self.efile_root, self.config.ARCH_FN, ".", self.oper, ".err"]) else: eosf_rename = ''.join([self.efile_root, self.config.ARCH_FN, ".", self.oper, ".done"]) old_url = client.URL(self.efile_full) new_url = client.URL(eosf_rename) frename = ''.join([old_url.protocol, "://", old_url.hostid, "//proc/user/?", "mgm.cmd=file&mgm.subcmd=rename&mgm.path=", old_url.path, "&mgm.file.source=", old_url.path, "&mgm.file.target=", new_url.path]) (status, __, stderr) = exec_cmd(frename) if not status: err_msg = ("Failed to rename {0} to {1}, msg={2}" "").format(self.efile_full, eosf_rename, stderr) self.logger.error(err_msg) # TODO: raise IOError else: # For successful delete operations remove also the archive file if self.oper == self.config.DELETE_OP and check_ok: fs = client.FileSystem(self.efile_full) st_rm, __ = fs.rm(new_url.path + "?eos.ruid=0&eos.rgid=0") if not st_rm.ok: warn_msg = "Failed to delete archive {0}".format(new_url.path) self.logger.warning(warn_msg) # Copy local log file back to EOS directory and set the ownership to the # identity of the client who triggered the archive dir_root = self.efile_root[self.efile_root.rfind('//') + 1:] eos_log = ''.join([old_url.protocol, "://", old_url.hostid, "/", dir_root, self.config.ARCH_FN, ".log?eos.ruid=0&eos.rgid=0"]) self.logger.debug("Copy log:{0} to {1}".format(self.config.LOG_FILE, eos_log)) self.config.handler.flush() cp_client = client.FileSystem(self.efile_full) st, __ = cp_client.copy(self.config.LOG_FILE, eos_log, force=True) if not st.ok: self.logger.error(("Failed to copy log file {0} to EOS at {1}" "").format(self.config.LOG_FILE, eos_log)) else: # User triggering archive operation owns the log file eos_log_url = client.URL(eos_log) fs = client.FileSystem(eos_log) arg = ''.join([eos_log_url.path, "?eos.ruid=0&eos.rgid=0&mgm.pcmd=chown&uid=", self.uid, "&gid=", self.gid]) xrd_st, __ = fs.query(QueryCode.OPAQUEFILE, arg) if not xrd_st.ok: err_msg = ("Failed setting ownership of the log file in" " EOS: {0}").format(eos_log) self.logger.error(err_msg) raise IOError(err_msg) else: # Delete log if successfully copied to EOS and changed ownership try: os.remove(self.config.LOG_FILE) except OSError as __: pass # Delete all local files associated with this transfer try: os.remove(self.tx_file) except OSError as __: pass # Join async status thread self.thread_status.do_finish() self.thread_status.join() def copy_files(self, err_entry=None, found_checkpoint=False): """ Copy files. Note that when doing PUT the layout is not conserved. Therefore, a file with 3 replicas will end up as just a simple file in the new location. Args: err_entry (list): Entry record from the archive file corresponding to the first file/dir that was corrupted. found_checkpoint (boolean): If True it means the checkpoint was already found and we don't need to search for it. Raises: IOError: Copy request failed. """ indx_file = 0 # For inital PUT copy also the archive file to tape if self.init_put: # The archive init is already renamed to archive.put.err at this # and we need to take this into consideration when trasferring it url = client.URL(self.efile_full) eos_fs = client.FileSystem(self.efile_full) st_stat, resp = eos_fs.stat(url.path) if st_stat.ok: __, dst = self.archive.get_endpoints(self.config.ARCH_INIT) self.list_jobs.append((self.efile_full + "?eos.ruid=0&eos.rgid=0" + "&eos.app=archive", dst, resp.size)) else: err_msg = ''.join(["Failed to get init archive file info, msg=", st_stat.message]) self.logger.error(err_msg) raise IOError(err_msg) # Copy files for fentry in self.archive.files(): # Search for the recovery checkpoint if self.do_retry and not found_checkpoint: if fentry != err_entry: indx_file += 1 continue else: found_checkpoint = True indx_file += 1 msg = "copy file {0}/{1}".format(indx_file, self.archive.header['num_files']) self.set_status(msg) src, dst = self.archive.get_endpoints(fentry[1]) dfile = dict(zip(self.archive.header['file_meta'], fentry[2:])) # Copy file if not self.archive.d2t: # For GET we also have the dictionary with the metadata dst = ''.join([dst, "?eos.ctime=", dfile['ctime'], "&eos.mtime=", dfile['mtime'], "&eos.bookingsize=", dfile['size'], "&eos.targetsize=", dfile['size'], "&eos.ruid=0&eos.rgid=0&eos.app=archive"]) # If checksum 0 don't enforce it if dfile['xs'] != "0": dst = ''.join([dst, "&eos.checksum=", dfile['xs']]) # For backup we try to read as root from the source if self.oper == self.config.BACKUP_OP: if '?' in src: src = ''.join([src, "&eos.ruid=0&eos.rgid=0&eos.app=archive"]) else: src = ''.join([src, "?eos.ruid=0&eos.rgid=0&eos.app=archive"]) # If this is a version file we save it as a 2-replica layout if is_version_file(fentry[1]): dst = ''.join([dst, "&eos.layout.checksum=", dfile['xstype'], "&eos.layout.type=replica&eos.layout.nstripes=2"]) # If time window specified then select only the matching entries if (self.archive.header['twindow_type'] and self.archive.header['twindow_val']): twindow_sec = int(self.archive.header['twindow_val']) tentry_sec = int(float(dfile[self.archive.header['twindow_type']])) if tentry_sec < twindow_sec: continue else: # For PUT read the files from EOS as root src = ''.join([src, "?eos.ruid=0&eos.rgid=0&eos.app=archive"]) self.logger.info("Copying from {0} to {1}".format(src, dst)) self.list_jobs.append((src, dst, dfile['size'])) if len(self.list_jobs) >= self.config.BATCH_SIZE: st = self.flush_files(False) # For archives we fail immediately, for backups it's best-effort if not st and self.oper != self.config.BACKUP_OP: err_msg = "Failed to flush files" self.logger.error(err_msg) raise IOError(err_msg) # Flush all pending copies and set metadata info for GET operation st = self.flush_files(True) if not st and self.oper != self.config.BACKUP_OP: err_msg = "Failed to flush files" self.logger.error(err_msg) raise IOError(err_msg) def flush_files(self, wait_all): """ Flush all pending transfers from the list of jobs. Args: wait_all (bool): If true wait and collect the status from all executing threads. Returns: True if files flushed successfully, otherwise false. """ status = True # Wait until a thread from the pool gets freed if we reached the maximum # allowed number of running threads while len(self.threads) >= self.config.MAX_THREADS: remove_indx, retry_threads = [], [] for indx, thread in enumerate(self.threads): thread.join(self.config.JOIN_TIMEOUT) # If thread finished get the status and mark it for removal if not thread.is_alive(): # If failed then attempt a retry if (not thread.xrd_status.ok and thread.retries <= self.config.MAX_RETRIES): self.logger.log(logging.INFO, ("Thread={0} failed, retries={1}").format (thread.ident, thread.retries)) rthread = ThreadJob(thread.lst_jobs, thread.retries) rthread.start() retry_threads.append(rthread) remove_indx.append(indx) self.logger.log(logging.INFO,("New thread={0} doing a retry").format (rthread.ident)) continue status = status and thread.xrd_status.ok log_level = logging.INFO if thread.xrd_status.ok else logging.ERROR self.logger.log(log_level,("Thread={0} status={1} msg={2}").format (thread.ident, thread.xrd_status.ok, thread.xrd_status.message)) remove_indx.append(indx) break # Remove old/finished threads and add retry ones. For removal we # need to start with big indexes first. remove_indx.reverse() for indx in remove_indx: del self.threads[indx] self.threads.extend(retry_threads) del retry_threads[:] del remove_indx[:] # If we still have jobs and previous archive jobs were successful or this # is a backup operartion (best-effort even if we have failed transfers) if (self.list_jobs and ((self.oper != self.config.BACKUP_OP and status) or (self.oper == self.config.BACKUP_OP))): thread = ThreadJob(self.list_jobs) thread.start() self.threads.append(thread) del self.list_jobs[:] # If a previous archive job failed or we need to wait for all jobs to # finish then join the threads and collect their status if (self.oper != self.config.BACKUP_OP and not status) or wait_all: remove_indx, retry_threads = [], [] while self.threads: for indx, thread in enumerate(self.threads): thread.join() # If failed then attempt a retry if (not thread.xrd_status.ok and thread.retries <= self.config.MAX_RETRIES): self.logger.log(logging.INFO, ("Thread={0} failed, retries={1}").format (thread.ident, thread.retries)) rthread = ThreadJob(thread.lst_jobs, thread.retries) rthread.start() retry_threads.append(rthread) remove_indx.append(indx) self.logger.log(logging.INFO,("New thread={0} doing a retry").format (rthread.ident)) continue status = status and thread.xrd_status.ok log_level = logging.INFO if thread.xrd_status.ok else logging.ERROR self.logger.log(log_level, ("Thread={0} status={1} msg={2}").format (thread.ident, thread.xrd_status.ok, thread.xrd_status.message)) remove_indx.append(indx) # Remove old/finished threads and add retry ones. For removal we # need to start with big indexes first. remove_indx.reverse() for indx in remove_indx: del self.threads[indx] self.threads.extend(retry_threads) del retry_threads[:] del remove_indx[:] return status def update_file_access(self, err_entry=None, found_checkpoint=False): """ Set the ownership and the permissions for the files copied to EOS. This is done only for GET operation i.e. self.archive.d2t == False. Args: err_entry (list): Entry record from the archive file corresponding to the first file/dir that was corrupted. found_checkpoint (boolean): If True, it means the checkpoint was already found and we don't need to search for it i.e. the corrupted entry is a directory. Raises: IOError: chown or chmod operations failed """ if self.archive.d2t: return self.set_status("updating file access") t0 = time.time() oper = 'query' metahandler = MetaHandler() fs = self.archive.fs_src for fentry in self.archive.files(): # If backup operation and time window specified then update only matching ones if self.oper == self.config.BACKUP_OP: if self.archive.header['twindow_type'] and self.archive.header['twindow_val']: dfile = dict(zip(self.archive.header['file_meta'], fentry[2:])) twindow_sec = int(self.archive.header['twindow_val']) tentry_sec = int(float(dfile[self.archive.header['twindow_type']])) if tentry_sec < twindow_sec: continue # Search for the recovery checkpoint if err_entry and not found_checkpoint: if fentry != err_entry: continue else: found_checkpoint = True __, surl = self.archive.get_endpoints(fentry[1]) url = client.URL(surl) dict_meta = dict(zip(self.archive.header['file_meta'], fentry[2:])) # Send the chown async request arg = ''.join([url.path, "?eos.ruid=0&eos.rgid=0&mgm.pcmd=chown&uid=", dict_meta['uid'], "&gid=", dict_meta['gid']]) xrd_st = fs.query(QueryCode.OPAQUEFILE, arg, callback=metahandler.register(oper, surl)) if not xrd_st.ok: __ = metahandler.wait(oper) err_msg = "Failed query chown for path={0}".format(surl) self.logger.error(err_msg) raise IOError(err_msg) # Send the chmod async request mode = int(dict_meta['mode'], 8) # mode is saved in octal format arg = ''.join([url.path, "?eos.ruid=0&eos.rgid=0&mgm.pcmd=chmod&mode=", str(mode)]) xrd_st = fs.query(QueryCode.OPAQUEFILE, arg, callback=metahandler.register(oper, surl)) if not xrd_st.ok: __ = metahandler.wait(oper) err_msg = "Failed query chmod for path={0}".format(surl) self.logger.error(err_msg) raise IOError(err_msg) # Send the utime async request to set the mtime mtime = dict_meta['mtime'] mtime_sec, mtime_nsec = mtime.split('.', 1) ctime = dict_meta['ctime'] ctime_sec, ctime_nsec = ctime.split('.', 1) arg = ''.join([url.path, "?eos.ruid=0&eos.rgid=0&mgm.pcmd=utimes", "&tv1_sec=", ctime_sec, "&tv1_nsec=", ctime_nsec, "&tv2_sec=", mtime_sec, "&tv2_nsec=", mtime_nsec]) xrd_st = fs.query(QueryCode.OPAQUEFILE, arg, callback=metahandler.register(oper, surl)) if not xrd_st.ok: __ = metahandler.wait(oper) err_msg = "Failed query utimes for path={0}".format(surl) self.logger.error(err_msg) raise IOError(err_msg) status = metahandler.wait(oper) if status: t1 = time.time() self.logger.info("TIMING_update_file_access={0} sec".format(t1 - t0)) else: err_msg = "Failed update file access" self.logger.error(err_msg) raise IOError(err_msg) def check_previous_tx(self): """ Find checkpoint for a previous run. There are two types of checks being done: - transfer check = verify that the files exist and have the correct size and checksum - metadata check = verify that all the entries have the correct meta- data values set Returns: (tx_ok, meta_ok, lst_failed): Tuple holding the status of the different checks and the list of corrupted entries. """ msg = "verify last run" self.set_status(msg) meta_ok = False # Check for existence, file size and checksum tx_ok, lst_failed = self.archive.verify(False, True) if tx_ok: meta_ok, lst_failed = self.archive.verify(False, False) if meta_ok: self.do_retry = False raise NoErrorException() # Delete the corrupted entry if this is a real transfer error if not tx_ok: err_entry = lst_failed[0] is_dir = (err_entry[0] == 'd') self.logger.info("Delete corrupted entry={0}".format(err_entry)) if is_dir: self.archive.del_subtree(err_entry[1], None) else: self.archive.del_entry(err_entry[1], False, None) return (tx_ok, meta_ok, lst_failed) def prepare2get(self, err_entry=None, found_checkpoint=False): """This method is only executed for GET operations and its purpose is to issue the Prepapre2Get commands for the files in the archive which will later on be copied back to EOS. Args: err_entry (list): Entry record from the archive file corresponding to the first file/dir that was corrupted. found_checkpoint (bool): If True it means the checkpoint was already found and we don't need to search for it. Raises: IOError: The Prepare2Get request failed. """ if self.archive.d2t: return count = 0 limit = 50 # max files per prepare request oper = 'prepare' self.set_status("prepare2get") t0 = time.time() lpaths = [] status = True metahandler = MetaHandler() for fentry in self.archive.files(): # Find error checkpoint if not already found if err_entry and not found_checkpoint: if fentry != err_entry: continue else: found_checkpoint = True count += 1 surl, __ = self.archive.get_endpoints(fentry[1]) lpaths.append(surl[surl.rfind('//') + 1:]) if len(lpaths) == limit: xrd_st = self.archive.fs_dst.prepare(lpaths, PrepareFlags.STAGE, callback=metahandler.register(oper, surl)) if not xrd_st.ok: __ = metahandler.wait(oper) err_msg = "Failed prepare2get for path={0}".format(surl) self.logger.error(err_msg) raise IOError(err_msg) # Wait for batch to be executed del lpaths[:] status = status and metahandler.wait(oper) self.logger.debug(("Prepare2get done count={0}/{1}" "").format(count, self.archive.header['num_files'])) if not status: break # Send the remaining requests if lpaths and status: xrd_st = self.archive.fs_dst.prepare(lpaths, PrepareFlags.STAGE, callback=metahandler.register(oper, surl)) if not xrd_st.ok: __ = metahandler.wait(oper) err_msg = "Failed prepare2get" self.logger.error(err_msg) raise IOError(err_msg) # Wait for batch to be executed del lpaths[:] status = status and metahandler.wait(oper) if status: t1 = time.time() self.logger.info("TIMING_prepare2get={0} sec".format(t1 - t0)) else: err_msg = "Failed prepare2get" self.logger.error(err_msg) raise IOError(err_msg) # Wait for all the files to be on disk for fentry in self.archive.files(): surl, __ = self.archive.get_endpoints(fentry[1]) url = client.URL(surl) while True: st_stat, resp_stat = self.archive.fs_dst.stat(url.path) if not st_stat.ok: err_msg = "Error stat entry={0}".format(surl) self.logger.error(err_msg) raise IOError() # Check if file is on disk if resp_stat.flags & StatInfoFlags.OFFLINE: self.logger.info("Sleep 5 seconds, file not on disk entry={0}".format(surl)) sleep(5) else: break self.logger.info("Finished prepare2get, all files are on disk") def evict_disk_cache(self): """ Send a prepare eviect request to the CTA so that the files are removed from the disk cached of the tape system. """ batch_size = 100 timeout = 10 batch = [] # @todo(esindril) use the XRootD proived flag once this is # available in the Python interface xrd_prepare_evict_flag = 0x000100000000 for fentry in self.archive.files(): __, dst = self.archive.get_endpoints(fentry[1]) url = client.URL(dst) batch.append(url.path) if len(batch) == batch_size: fs = self.archive.get_fs(dst) prep_stat, __ = fs.prepare(batch, xrd_prepare_evict_flag, 0, timeout) batch.clear() if not prep_stat.ok: self.logger.warning("Failed prepare evit for batch") if len(batch) != 0: fs = self.archive.get_fs(dst) prep_stat, __ = fs.prepare(batch, xrd_prepare_evict_flag, 0, timeout) batch.clear() if not prep_stat.ok: self.logger.warning("Failed prepare evit for batch") self.logger.info("Finished sending all the prepare evict requests") def wait_on_tape(self): """ Check and wait that all the files are on tape, which in our case means checking the "m" bit. If a file is not on tape then suspend the current thread for a period of 5 to 60 seconds but abort if the file fails to be archived on tape afte 24h """ max_timeout_per_entry = int(self.config.ARCHIVE_MAX_TIMEOUT) min_timeout, max_timeout = 5, 60 for fentry in self.archive.files(): start_ts = time.time() __, dst = self.archive.get_endpoints(fentry[1]) url = client.URL(dst) file_on_tape = False while not file_on_tape: st_stat, resp_stat = self.archive.fs_dst.stat(url.path) if not st_stat.ok: err_msg = "Error stat entry={0}".format(dst) self.logger.error(err_msg) raise IOError() # Check file is on tape if resp_stat.size != 0 and not (resp_stat.flags & StatInfoFlags.BACKUP_EXISTS): self.logger.debug("File {0} is not yet on tape".format(dst)) timeout = randrange(min_timeout, max_timeout) self.logger.info("Going to sleep for {0} seconds".format(timeout)) sleep(timeout) if time.time() - start_ts > max_timeout_per_entry: self.logger.notice("Entry not archived within the maximum timeout." " entry={0} archive_max_timeout={1}".format( fentry[1], max_timeout_per_entry)) break else: file_on_tape = True else: file_on_tape = True if not file_on_tape: # Throw exception to re-try the failed transfer raise NotOnTapeException() def backup_prepare(self): """ Prepare requested backup operation. Raises: IOError: Failed to transfer backup file. """ # Copy backup file from EOS to the local disk self.logger.info(("Prepare backup copy from {0} to {1}" "").format(self.efile_full, self.tx_file)) eos_fs = client.FileSystem(self.efile_full) st, _ = eos_fs.copy((self.efile_full + "?eos.ruid=0&eos.rgid=0"), self.tx_file, True) if not st.ok: err_msg = ("Failed to copy backup file={0} to local disk at={1} err_msg={2}" "").format(self.efile_full, self.tx_file, st.message) self.logger.error(err_msg) raise IOError(err_msg) # Create the ArchiveFile object for the backup which is similar to a # tape to disk transfer self.archive = ArchiveFile(self.tx_file, False) # Check that the destination directory exists and has mode 777, if # forced then skip checks if not self.force: surl = self.archive.header['dst'] url = client.URL(surl) fs = self.archive.get_fs(surl) st_stat, resp_stat = fs.stat((url.path, + "?eos.ruid=0&eos.rgid=0")) if st_stat.ok: err_msg = ("Failed to stat backup destination url={0}" "").format(surl) self.logger.error(err_msg) raise IOError(err_msg) if resp_stat.flags != (client.StatInfoFlags.IS_READABLE | client.StatInfoFlags.IS_WRITABLE): err_msg = ("Backup destination url={0} must have move 777").format(surl) self.logger.error(err_msg) raise IOError(err_msg) def do_backup(self): """ Perform a backup operation using the provided backup file. """ t0 = time.time() indx_dir = 0 # Root owns the .sys.b#.backup.file fs = client.FileSystem(self.efile_full) efile_url = client.URL(self.efile_full) arg = ''.join([efile_url.path, "?eos.ruid=0&eos.rgid=0&mgm.pcmd=chown&uid=0&gid=0"]) xrd_st, __ = fs.query(QueryCode.OPAQUEFILE, arg) if not xrd_st.ok: err_msg = "Failed setting ownership of the backup file: {0}".format(self.efile_full) self.logger.error(err_msg) raise IOError(err_msg) # Create directories for dentry in self.archive.dirs(): # Do special checks for root directory #if dentry[1] == "./": # self.archive.check_root_dir() indx_dir += 1 self.archive.mkdir(dentry) msg = "create dir {0}/{1}".format(indx_dir, self.archive.header['num_dirs']) self.set_status(msg) # Copy files and set metadata information self.copy_files() self.update_file_access() self.set_status("verifying") check_ok, lst_failed = self.archive.verify(True) self.backup_write_status(lst_failed, check_ok) self.set_status("cleaning") self.logger.info("TIMING_transfer={0} sec".format(time.time() - t0)) self.backup_tx_clean() def backup_write_status(self, lst_failed, check_ok): """ Create backup status file which constains the list of failed files to transfer. Args: lst_filed (list): List of failed file transfers check_ok (boolean): True if verification successful, otherwise false """ if not check_ok: self.logger.error("Failed verification for {0} entries".format(len(lst_failed))) fn_status = ''.join([self.efile_root, ".sys.b#.backup.err.", str(len(lst_failed)), "?eos.ruid=0&eos.rgid=0"]) else: self.logger.info("Backup successful - no errors detected") fn_status = ''.join([self.efile_root, ".sys.b#.backup.done?eos.ruid=0&eos.rgid=0"]) with client.File() as f: f.open(fn_status, OpenFlags.UPDATE | OpenFlags.DELETE) offset = 0 for entry in lst_failed: buff = "Failed entry={0}\n".format(entry) f.write(buff, offset, len(buff)) offset += len(buff) ================================================ FILE: archive/eosarch/utils.py ================================================ # ------------------------------------------------------------------------------ # File: utils.py # Author: Elvin-Alin Sindrilaru # ------------------------------------------------------------------------------ # # ****************************************************************************** # EOS - the CERN Disk Storage System # Copyright (C) 2014 CERN/Switzerland # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see . # ****************************************************************************** """Module containing helper function for the EOS archiver daemon.""" from __future__ import unicode_literals import logging from XRootD import client from XRootD.client.flags import OpenFlags logger = logging.getLogger("transfer") def seal_path(path, seal_dict={'&': "#AND#"}): """ Seal a path by replacing the key characters in the dictionary with their values so that EOS is happy. Args: path (str): Path to be sealed seal (dict): Seal dictionary Returns: The path transformed using the dictionary mapping. """ for key, val in seal_dict.items(): path = path.replace(key, val) return path def unseal_path(path, seal_dict={"#AND#": '&'}): """ Unseal a path by replacing the key characters in the dictionary with their values so that we are happy. Args: path (str): Path to be unsealed seal (dict): Unseal dictionary Returns: The path transformed using the dictionary mapping. """ for key, val in seal_dict.items(): path = path.replace(key, val) return path def is_version_file(path): """ Check if this is a version file i.e. contains the following prefix: ".sys.v#" Args: path (string): Relative path Returns: True if this is a version file, otherwise false. """ return path.startswith(".sys.v#.") or "/.sys.v#." in path def is_atomic_version_file(path): """ Check if this is a version file i.e. contains the following prefix: ".sys.a#.v#" Args: path (string): Relative path Returns: True if this is an atomic version file, otherwise false. """ return path.startswith(".sys.a#.v#.") or "/.sys.a#.v#." in path def exec_cmd(cmd): """ Execute an EOS /proc/user/ command. Args: cmd (str): Command to execute. Returns: Tuple containing the following elements: (status, stdout, stderr). Status is a boolean value while the rest are string. If data needs to be returned then it's put in stdout and any error messages are in stderr. """ logger.debug("Execute: {0}".format(cmd)) status, retc, stdout, stderr = False, "0", "", "" # Execute the command as root if role not already set if cmd.find("eos.ruid=") == -1: if cmd.find('?') == -1: cmd += "?eos.ruid=0&eos.rgid=0" else: cmd += "&eos.ruid=0&eos.rgid=0" with client.File() as f: st, __ = f.open(cmd, OpenFlags.READ) if st.ok: # Read the whole response data = "" off, sz = 0, 4096 st, chunk = f.read(off, sz) if st.ok: while st.ok and len(chunk): off += len(chunk) try: data += chunk.decode("utf-8") except: print("EHEHEHEH not able to decode str... only bytes") st, chunk = f.read(off, sz) lpairs = data.split('&') for elem in lpairs: if "mgm.proc.retc=" in elem: retc = elem[(elem.index('=') + 1):].strip() status = True if (retc == "0") else False elif "mgm.proc.stdout=" in elem: stdout = elem[(elem.index('=') + 1):].strip() stdout = unseal_path(stdout) elif "mgm.proc.stderr=" in elem: stderr = elem[(elem.index('=') + 1):].strip() stderr = unseal_path(stderr) else: stderr = "error reading response for command: {0}".format(cmd) else: stderr = "error sending command: {0}".format(cmd) # logger.debug("Return command: {0}".format((status, stdout, stderr))) return (status, stdout, stderr) def get_entry_info(url, rel_path, tags, is_dir): """ Get file/directory metadata information from EOS. Args: url (XRootD.URL): Full URL to EOS location. rel_path (str): Entry's relative path as saved in the archive file. tags (list): List of tags to look for in the fileinfo result. is_dir (bool): If True entry is a directory, otherwise a file. Returns: A list containing the info corresponding to the tags supplied in the args. Raises: IOError: Fileinfo request can not be submitted. AttributeError: Not all expected tags are provided. KeyError: Extended attribute value is not present. """ dinfo = [] finfo = ''.join([url.protocol, "://", url.hostid, "//proc/user/?", "mgm.cmd=fileinfo&mgm.path=", seal_path(url.path), "&mgm.file.info.option=-m"]) (status, stdout, stderr) = exec_cmd(finfo) if not status: err_msg = ("Path={0} failed fileinfo request, msg={1}").format( url.path, stderr) logger.error(err_msg) raise IOError(err_msg) # Extract the path by using the keylength.file value which represents the # size of the path. This is because the path can contain spaces. size_pair, file_pair, tail = stdout.split(' ', 2) sz_key, sz_val = size_pair.split('=', 1) file_key, file_val = file_pair.split('=', 1) if sz_key == "keylength.file" and file_key == "file" : path = file_val path_size = int(sz_val) while path_size > len(path.encode("utf-8")): path_token, tail = tail.split(' ', 1) path += ' ' path += path_token else: err_msg = ("Fileinfo response does not start with keylength.file " "for path").format(url.path) logger.error(err_msg) raise IOError(err_msg) # For the rest we don't expect any surprizes, they shoud be key=val pairs lpairs = tail.split(' ') it_list = iter(lpairs) dict_info, dict_attr = {}, {} # Parse output of fileinfo -m keeping only the required keys for elem in it_list: if '=' not in elem: continue key, value = elem.split('=', 1) if len(value) == 0: continue if key in tags: dict_info[key] = value elif key == "xattrn" and is_dir: xkey, xval = next(it_list).split('=', 1) if xkey != "xattrv": err_msg = ("Dir={0} no value for xattrn={1}").format( url.path, value) logger.error(err_msg) raise KeyError(err_msg) else: dict_attr[value] = xval # For directories add also the xattr dictionary if is_dir and "attr" in tags: dict_info["attr"] = dict_attr if len(dict_info) == len(tags): # Dirs must end with '/' just as the output of EOS fileinfo -d tentry = 'd' if is_dir else 'f' dinfo.extend([tentry, rel_path]) for tag in tags: dinfo.append(dict_info[tag]) else: err_msg = ("Path={0}, not all expected tags found").format(url.path) logger.error(err_msg) raise AttributeError(err_msg) return dinfo def set_dir_info(surl, dict_dinfo, excl_xattr): """ Set directory metadata information in EOS. Args: surl (string): Full URL of directory dict_dinfo (dict): Dictionary containsing meta-data information excl_xattr (list): List of excluded extended attributes Raises: IOError: Metadata operation failed. """ url = client.URL(surl) # Change ownership of the directory fsetowner = ''.join([url.protocol, "://", url.hostid, "//proc/user/?", "mgm.cmd=chown&mgm.path=", seal_path(url.path), "&mgm.chown.owner=", dict_dinfo['uid'], ":", dict_dinfo['gid']]) (status, stdout, stderr) = exec_cmd(fsetowner) if not status: err_msg = "Dir={0}, error doing chown, msg={1}".format(url.path, stderr) logger.error(err_msg) raise IOError(err_msg) # Set permission on the directory fchmod = ''.join([url.protocol, "://", url.hostid, "//proc/user/?", "mgm.cmd=chmod&mgm.path=", seal_path(url.path), "&mgm.chmod.mode=", dict_dinfo['mode']]) (status, stdout, stderr) = exec_cmd(fchmod) if not status: err_msg = "Dir={0}, error doing chmod, msg={1}".format(url.path, stderr) logger.error(err_msg) raise IOError(err_msg) # Deal with extended attributes. If all are excluded then don't touch them. if "*" in excl_xattr: return # Get all the current xattrs flsattr = ''.join([url.protocol, "://", url.hostid, "//proc/user/?", "mgm.cmd=attr&mgm.subcmd=ls&mgm.path=", seal_path(url.path)]) (status, stdout, stderr) = exec_cmd(flsattr) if not status: err_msg = "Dir={0}, error listing xattrs, msg ={1}".format( url.path, stderr) logger.error(err_msg) raise IOError(err_msg) lattrs = [s.split('=', 1)[0] for s in stdout.splitlines()] for attr in lattrs: # Don't remove the excluded xattrs if attr in excl_xattr: continue frmattr = ''.join([url.protocol, "://", url.hostid, "//proc/user/?", "mgm.cmd=attr&mgm.subcmd=rm&mgm.attr.key=", attr, "&mgm.path=", seal_path(url.path)]) (status, __, stderr) = exec_cmd(frmattr) if not status: err_msg = ("Dir={0} error while removing attr={1}, msg={2}" "").format(url.path, attr, stderr) logger.error(err_msg) raise IOError(err_msg) # Set the expected extended attributes dict_dattr = dict_dinfo['attr'] for key, val in dict_dattr.items(): # Don't set the excluded xattrs if key in excl_xattr: continue if len(val) == 0: continue fsetattr = ''.join([url.protocol, "://", url.hostid, "//proc/user/?", "mgm.cmd=attr&mgm.subcmd=set&mgm.attr.key=", key, "&mgm.attr.value=", val, "&mgm.path=", seal_path(url.path)]) (status, __, stderr) = exec_cmd(fsetattr) if not status: err_msg = "Dir={0}, error setting attr={1}, msg={2}".format( url.path, key, stderr) logger.error(err_msg) raise IOError(err_msg) ================================================ FILE: archive/eosarch_reconstruct.py ================================================ #!/usr/bin/python3 # ------------------------------------------------------------------------------ # File: eosarch_reconstruct.py # Author: Elvin-Alin Sindrilaru # ------------------------------------------------------------------------------ # # ****************************************************************************** # EOS - the CERN Disk Storage System # Copyright (C) 2014 CERN/Switzerland # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see . # ****************************************************************************** """ This tool can be used to reconstruct an archive file starting from the data which is actually saved on tape (CASTOR). The tape systems needs to have an XRootD interface. The way it works it that the archive file is constructed locally and then is uploaded to the specified EOS root directory which must not exist previously. The archive file is copied to EOS using the filename .archive.purge.done so that the user is then able to get the data from the tape system back into EOS. The UID which is provided when launching the command is given permission to execute archive operations on the corresponding EOS directory. """ from __future__ import print_function import sys import os import ast import errno import stat import time import argparse import tempfile from eosarch.utils import exec_cmd, set_dir_info try: from XRootD import client from XRootD.client.flags import DirListFlags, StatInfoFlags, QueryCode except ImportError as ierr: print("Missing xrootd-python package", file=sys.stderr) class EosAccessException(Exception): """ Exception raised when the current user does not have full sudo rights EOS to perform the necessary operation for the archiving reconstruct. """ pass class TapeAccessException(Exception): """ Exception raised when the current user can not access information from the tape system. """ pass class ArchReconstruct(object): """ Class responsible for reconstructing the archive file from an already directory subtree from tape. """ def __init__(self, surl, durl, args): """ Initialize the ArchReconstruct object Args: surl (XRootD.URL): URL to tape backend (CASTOR) durl (XRootD.URL): URL to disk destination (EOS) args (Namespace): Namespace object containing at least the following attributes: uid (string): UID of archive owner in numeric format gid (string): GID of archive owner in numeric format svc_class (string): Service class used for retrieving the archived data skip_no_xs (bool): Skip files that don't have a checksum """ self.src_url = surl self.dst_url = durl self.uid, self.gid = args.uid, args.gid self.svc_class = args.svc_class self.skip_no_xs = args.skip_no_xs self.ffiles = tempfile.TemporaryFile(mode='w+') self.fdirs = tempfile.TemporaryFile(mode='w+') self.farchive = tempfile.NamedTemporaryFile(mode='w+', delete=False) print("Temp. archive file saved in: {0}".format(self.farchive.name), file=sys.stdout) def __del__(self): """ Destructor - make sure we close the temporary files """ self.ffiles.close() self.fdirs.close() self.farchive.close() def breadth_first(self): """ Traverse the filesystem subtree using breadth-first search and collect the directory information and file information into separate files which will be merged in the end. """ # Dir format: type, rel_path, uid, gid, mode, attr dir_meta = "[\"uid\", \"gid\", \"mode\", \"attr\"]" dir_format = "[\"d\", \"{0}\", \"{1}\", \"{2}\", \"{3}\", {4}]" # File format: type, rel_path, size, mtime, ctime, uid, gid, mode, xstype, xs # Fake mtime and ctime subsecond precision file_meta = ("[\"size\", \"mtime\", \"ctime\", \"uid\", \"gid\", \"mode\", " "\"xstype\", \"xs\"]") file_format = ("[\"f\", \"{0}\", \"{1}\", \"{2}.0\", \"{3}.0\", \"{4}\", " "\"{5}\", \"{6}\", \"{7}\", \"{8}\"]") # Attrs for 2 replica layout in EOS with current user the only one # allowed to trigger archiving operations replica_attr = ("{{\"sys.acl\": \"u:{0}:a,z:i\", " "\"sys.forced.blockchecksum\": \"crc32c\", " "\"sys.forced.blocksize\": \"4k\", " "\"sys.forced.checksum\": \"adler\", " "\"sys.forced.layout\": \"replica\", " "\"sys.forced.nstripes\": \"2\", " "\"sys.forced.space\": \"default\"}}").format(self.uid) num_files, num_dirs = 0, 0 fs = client.FileSystem(str(self.src_url)) # Add root directory which is a bit special and set its metadata # Dir mode is 42755 and file mode is 0644 dir_mode = oct(stat.S_IFDIR | stat.S_ISGID | stat.S_IRWXU | stat.S_IRGRP | stat.S_IXGRP | stat.S_IROTH | stat.S_IXOTH) dir_mode = dir_mode[1:] # remove leading 0 used for octal format file_mode = oct(stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IROTH) print(dir_format.format("./", self.uid, self.gid, dir_mode, replica_attr), file=self.fdirs) dict_attr = ast.literal_eval(replica_attr) dict_dinfo = dict(zip(["uid", "gid", "mode", "attr"], [self.uid, self.gid, dir_mode, dict_attr])) set_dir_info(str(self.dst_url), dict_dinfo, list()) root_path = self.src_url.path lst_dirs = [root_path] while lst_dirs: path = lst_dirs.pop(0) st, listing = fs.dirlist(path, DirListFlags.STAT) if not st.ok: msg = "Failed to list dir={0}".format(self.src_url.path) raise TapeAccessException(msg) for elem in listing: if elem.name == ".archive.init": msg = ("Trying to reconstruct an already existing archive " "in directory: {0}").format(path) raise TapeAccessException(msg) if elem.statinfo.flags & StatInfoFlags.IS_DIR: num_dirs += 1 full_path = ''.join([path, elem.name, '/']) rel_path = full_path.replace(root_path, "") lst_dirs.append(full_path) print(dir_format.format(rel_path, self.uid, self.gid, dir_mode, replica_attr), file=self.fdirs) else: full_path = ''.join([path, elem.name]) rel_path = full_path.replace(root_path, "") st, xs_resp = fs.query(QueryCode.CHECKSUM, full_path) if not st.ok: # If requested then skip the files that don't have a checksum if self.skip_no_xs: continue msg = "File={0} failed xs query".format(full_path) raise TapeAccessException(msg) num_files += 1 # Result has an annoying \x00 character at the end and it # contains the xs type (adler32) and the xs value resp = xs_resp.strip('\x00\0\n ').split() # If checksum value is not 8 char long then we need padding if len(resp[1]) != 8 : resp[1] = "{0:0>8}".format(resp[1]) if resp[0] != "adler32": msg = ("Unknown checksum type={0} from tape system" "".format(resp[0])) raise TapeAccessException(msg) print(file_format.format(rel_path, elem.statinfo.size, elem.statinfo.modtime, elem.statinfo.modtime, self.uid, self.gid, file_mode, "adler", resp[1]), file=self.ffiles) # Write archive file header header_format = ("{{\"src\": \"{0}\", " "\"dst\": \"{1}\", " "\"svc_class\": \"{2}\", " "\"dir_meta\": {3}, " "\"file_meta\": {4}, " "\"num_dirs\": {5}, " "\"num_files\": {6}, " "\"uid\": \"{7}\", " "\"gid\": \"{8}\", " "\"timestamp\": \"{9}\"}}") print(header_format.format(str(self.dst_url), str(self.src_url), self.svc_class, dir_meta, file_meta, num_dirs, num_files, self.uid, self.gid, time.time()), file=self.farchive, end="\n") # Rewind to the begining of the tmp files self.fdirs.seek(0) self.ffiles.seek(0) # Write directories for line in self.fdirs: print(line, file=self.farchive, end="") # Write files for line in self.ffiles: print(line, file=self.farchive, end="") self.farchive.close() def upload_archive(self): """ Upload archive file to EOS directory. Note that we save it the the name .archive.purge since this is the only possible operation when we do such a reconstruct. """ cp = client.CopyProcess() dst = ''.join([str(self.dst_url), ".archive.purge.done?eos.ruid=0&eos.rgid=0"]) cp.add_job(self.farchive.name, dst, force=True) status = cp.prepare() if not status.ok: msg = "Failed while preparing to upload archive file to EOS" raise EosAccessException(msg) status = cp.run() if not status.ok: msg = "Failed while copying the archive file to EOS" raise EosAccessException(msg) else: # Delete local archive file try: os.remove(self.farchive.name) except OSError as __: pass def check_eos_access(url): """ Check that the current user executing the programm is mapped as root in EOS otherwise he will not be able to set all the necessary attributes for the newly built archive. Make sure also that the root destination does not exist already. Args: url (XRootD.URL): EOS URL to the destination path Raises: EosAccessException """ fwhoami = ''.join([url.protocol, "://", url.hostid, "//proc/user/?mgm.cmd=whoami"]) (status, out, __) = exec_cmd(fwhoami) if not status: msg = "Failed to execute EOS whoami command" raise EosAccessException(msg) # Extrach the uid and gid from the response out.strip("\0\n ") lst = out.split(' ') try: for token in lst: if token.startswith("uid="): uid = int(token[4:]) elif token.startswith("gid="): gid = int(token[4:]) except ValueError as __: msg = "Failed while parsing uid/gid response to EOS whoami command" raise EosAccessException(msg) if uid != 0 or gid != 0: msg = "User {0} does not have full rights in EOS - aborting".format(os.getuid()) raise EosAccessException(msg) # Check that root directory does not exist already fs = client.FileSystem(str(url)) st, __ = fs.stat(url.path) if st.ok: msg = "EOS root directory already exists" raise EosAccessException(msg) fmkdir = ''.join([url.protocol, "://", url.hostid, "//proc/user/?mgm.cmd=mkdir&" "mgm.path=", url.path]) (status, __, __) = exec_cmd(fmkdir) if not status: msg = "Failed to create EOS directory: {0}".format(url.path) raise EosAccessException(msg) def main(): """ Main function """ parser = argparse.ArgumentParser(description="Tool used to create an archive " "file from an already existing archvie such " "that the recall of the files can be done " "using the EOS archiving tool. The files are " "copied back to EOS using the 2replica layout.") parser.add_argument("-s", "--src", required=True, help="XRootD URL to archive tape source (CASTOR location)") parser.add_argument("-d", "--dst", required=True, help="XRootD URL to archive disk destination (EOS location)") parser.add_argument("-c", "--svc_class", default="default", help="Service class used for getting the files from tape") parser.add_argument("-u", "--uid", default="0", help="User UID (numeric)") parser.add_argument("-g", "--gid", default="0", help="User GID (numeric)") parser.add_argument("-x", "--skip_no_xs", default=False, action="store_true", help="Skip files that don't have a checksum") args = parser.parse_args() try: int(args.uid) int(args.gid) except ValueError as __: print("Error: UID/GID must be in numeric format", file=sys.stderr) exit(errno.EINVAL) # Make sure the source and destination are directories if args.src[-1] != '/': args.src += '/' if args.dst[-1] != '/': args.dst += '/' # Check the source and destiantion are valid XRootD URLs url_dst = client.URL(args.dst) url_src = client.URL(args.src) if not url_dst.is_valid() or not url_src.is_valid(): print("Error: Destination/Source URL is not valid", file=sys.stderr) exit(errno.EINVAL) avoid_local = ["localhost", "localhost4", "localhost6", "localhost.localdomain", "localhost4.localdomain4", "localhost6.localdomain6"] if url_dst.hostname in avoid_local or url_src.hostname in avoid_local: print("Please use FQDNs in the XRootD URLs", file=sys.stderr) exit(errno.EINVAL) try: check_eos_access(url_dst) except EosAccessException as err: print("Error: {0}".format(str(err)), file=sys.stderr) exit(errno.EPERM) archr = ArchReconstruct(url_src, url_dst, args) try: archr.breadth_first() archr.upload_archive() except (TapeAccessException, IOError) as err: print("Error: {0}".format(str(err)), file=sys.stderr) exit(errno.EIO) if __name__ == '__main__': main() ================================================ FILE: archive/eosarch_run.py ================================================ #!/usr/bin/python3 # ------------------------------------------------------------------------------ # File: eosarch_run.py # Author: Elvin-Alin Sindrilaru # ------------------------------------------------------------------------------ # # ****************************************************************************** # EOS - the CERN Disk Storage System # Copyright (C) 2014 CERN/Switzerland # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see . # ****************************************************************************** """ Script used for starting an archiving transfer in a subprocess which also closes the open file descriptors such that there is no interference between the processes using ZMQ. """ from __future__ import unicode_literals from __future__ import print_function import ast import sys import os import logging from errno import EIO, EINVAL from hashlib import sha256 # Note: this is to be enabled only when we want to get the logging from the # XrdCl - notice that this can grow very big, very fast. We also have to do # this here before the XrdCl module gets initialised. #os.environ['XRD_LOGLEVEL'] = "Debug" #os.environ['XRD_LOGFILE'] = "/tmp/eosarch_xrdcl.log" from eosarch import Transfer, NoErrorException, Configuration try: config = Configuration() except Exception as err: print("Configuration failed, error:{0}".format(err), file=sys.stderr) raise # Set location for local transfer files for oper in [config.GET_OP, config.PUT_OP, config.PURGE_OP, config.DELETE_OP, config.BACKUP_OP]: path = config.EOS_ARCHIVE_DIR + oper + '/' config.DIR[oper] = path req_dict = ast.literal_eval(sys.argv[1]) src = req_dict['src'] pos = src.find("//", src.find("//") + 1) + 1 root_dir = src[pos : src.rfind('/') + 1] uuid = sha256(root_dir.encode()).hexdigest() log_file = config.DIR[req_dict['cmd']] + uuid + ".log" config.start_logging("transfer", log_file, False) try: tx = Transfer(req_dict, config) except Exception as err: config.logger.exception(err) raise try: tx.run() except IOError as err: print("{0}".format(err), file=sys.stderr) tx.logger.exception(err) tx.tx_clean(False) sys.exit(EIO) except NoErrorException as err: tx.tx_clean(True) except Exception as err: print("{0}".format(err), file=sys.stderr) tx.logger.exception(err) tx.tx_clean(False) sys.exit(EINVAL) ================================================ FILE: archive/eosarchived.conf ================================================ # ------------------------------------------------------------------------------ # File: eosarchiverd.conf # Author: Elvin-Alin Sindrilaru # ------------------------------------------------------------------------------ # # ****************************************************************************** # EOS - the CERN Disk Storage System # Copyright (C) 2014 CERN/Switzerland # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see . # ****************************************************************************** # Log level can be one of the following (it uses the syslog convention): # info, notice, warn/warning, err/error, emerg/panic, debug, crit/critical, alert LOG_LEVEL=debug # Max number of transfer that can run in parallel MAX_TRANSFERS=10 # Max number of transfers to be performed by one thread BATCH_SIZE=10 # Max number of threads used per transfer process MAX_THREADS=5 # Max number of reties for a batch of jobs that have failed. This is used to # protect against transient failures, so that the user doesn't have to babysit # the entire transfer. MAX_RETRIES=5 # Poll timeout in milliseconds - period after which the master requests on its # own for an update from the workers if there are no requests in the mean time. # This also has the role to join the the worker processes which have finished # in the mean time an print their returncode. POLL_TIMEOUT=30000 # Join timeout in seconds for running threads inside a process JOIN_TIMEOUT=1 # Maximum timeout value in seconds for a file entry to be migrated to tape. # When this timeout expires the transfer process is retried. By default this # is 86400 seconds (1 day). #ARCHIVE_MAX_TIMEOUT=86400 ================================================ FILE: archive/eosarchived.py ================================================ #!/usr/bin/python3 # ------------------------------------------------------------------------------ # File: eosarchived.py # Author: Elvin-Alin Sindrilaru # ------------------------------------------------------------------------------ # # ****************************************************************************** # EOS - the CERN Disk Storage System # Copyright (C) 2014 CERN/Switzerland # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see . # ****************************************************************************** """Module running the eosarchiverd daemon which transfers files between EOS and CASTOR. """ from __future__ import unicode_literals from __future__ import print_function import os import sys import zmq import stat import subprocess import ast import logging import time import logging.handlers from eosarch import ProcessInfo, Configuration class Dispatcher(object): """ Dispatcher daemon responsible for receiving requests from the clients and then spawning the proper executing process for archiving operations Attributes: procs (dict): Dictionary containing the currently running processes """ def __init__(self, config): self.config = config self.logger = logging.getLogger("dispatcher") self.procs = {} self.pending = {} self.backend_req, self.backend_pub, self.backend_poller = None, None, None def run(self): """ Server entry point which is responsible for spawning worker proceesses that do the actual transfers (put/get). """ # Set the triggers for different types of commands trigger = {self.config.PUT_OP: self.start_transfer, self.config.GET_OP: self.start_transfer, self.config.DELETE_OP: self.start_transfer, self.config.PURGE_OP: self.start_transfer, self.config.BACKUP_OP: self.start_transfer, self.config.TX_OP: self.do_show_transfers, self.config.KILL_OP: self.do_kill, self.config.STATS: self.get_stats} ctx = zmq.Context.instance() self.logger.info("Started dispatcher process ...") # Socket used for communication with EOS MGM frontend = ctx.socket(zmq.REP) addr = "ipc://" + self.config.FRONTEND_IPC frontend.bind(addr) # Socket used for communication with worker processes self.backend_req = ctx.socket(zmq.ROUTER) addr = "ipc://" + self.config.BACKEND_REQ_IPC self.backend_req.bind(addr) self.backend_pub = ctx.socket(zmq.PUB) addr = "ipc://" + self.config.BACKEND_PUB_IPC self.backend_pub.bind(addr) self.backend_poller = zmq.Poller() self.backend_poller.register(self.backend_req, zmq.POLLIN) mgm_poller = zmq.Poller() mgm_poller.register(frontend, zmq.POLLIN) time.sleep(1) # Attach orphan processes which may be running before starting the daemon self.get_orphans() while True: events = dict(mgm_poller.poll(self.config.POLL_TIMEOUT)) self.update_status() if events and events.get(frontend) == zmq.POLLIN: try: req_json = frontend.recv_json() except zmq.ZMQError as err: if err.errno == zmq.ETERM: break # shutting down, exit else: raise except ValueError as err: self.logger.error("Command in not in JSON format") frontend.send("ERROR error:command not in JSON format") continue self.logger.debug("Received command: {0}".format(req_json)) try: reply = trigger[req_json['cmd']](req_json) except KeyError as err: self.logger.error("Unknown command type: {0}".format(req_json['cmd'])) reply = "ERROR error: operation not supported" raise frontend.send_string(reply) def get_orphans(self): """ Get orphan transfer processes from previous runs of the daemon """ self.logger.info("Get orphans") tries = 0 num = self.num_processes() # Get status for orphan processes while len(self.procs) != num and tries < 10: tries += 1 self.procs.clear() num = self.num_processes() self.backend_pub.send_multipart([b"[MASTER]", b"{'cmd': 'orphan_status'}"]) while True: events = dict(self.backend_poller.poll(1000)) if events and events.get(self.backend_req) == zmq.POLLIN: [__, resp] = self.backend_req.recv_multipart() self.logger.info("Received response: {0}".format(resp)) # Convert response to python dictionary dict_resp = ast.literal_eval(resp.decode()) if not isinstance(dict_resp, dict): err_msg = "Response={0} is not a dictionary".format(resp) self.logger.error(err_msg) continue pinfo = ProcessInfo(None) pinfo.update(dict_resp) if pinfo.uuid not in self.procs: self.procs[pinfo.uuid] = pinfo else: # TIMEOUT self.logger.info("Get orphans status timeout") break self.logger.debug(("Try={0}, got {1}/{2} orphan processe responses" "").format(tries, len(self.procs), num)) def num_processes(self): """ Get the number of running archive processes on the current system by executing the ps command Returns: Number of running processes Raises: ValueError in case the output of ps is not a valid pid number """ pid = os.getpid() # TODO: make the resolution of the eosarch_run.py more elegant exec_fname = "eosarch_run.py" ps_proc = subprocess.Popen([("ps -eo pid,ppid,comm | egrep \"{0}\$\" | " "awk '{{print $1}}'").format(exec_fname)], stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True) ps_out, __ = ps_proc.communicate() if len(ps_out) == 0: return 0 ps_out = ps_out.strip('\0\n') proc_lst = ps_out.split('\n') try: num = len([x for x in proc_lst if pid != int(x)]) except ValueError as __: self.logger.error("ps output x={0} is not a valid pid value".format(x)) raise return num def update_status(self): """ Update the status of the processes """ self.backend_pub.send_multipart([b"[MASTER]", b"{'cmd': 'status'}"]) recv_uuid = [] while len(recv_uuid) < len(self.procs): events = dict(self.backend_poller.poll(400)) if events and events.get(self.backend_req) == zmq.POLLIN: [__, resp] = self.backend_req.recv_multipart() self.logger.debug("Received response: {0}".format(resp)) # Convert response to python dictionary dict_resp = ast.literal_eval(resp.decode()) if not isinstance(dict_resp, dict): self.logger.error("Response is not a dictionary") continue # Update the local info about the process try: self.procs[dict_resp['uuid']].update(dict_resp) except KeyError as __: err_msg = ("Unknown process response:{0}").format(dict_resp) self.logger.error(err_msg) recv_uuid.append(dict_resp['uuid']) else: # TIMEOUT self.logger.debug("Update status timeout") break # Check if processes that didn't respond are still alive unresp = [proc for (uuid, proc) in self.procs.items() if uuid not in recv_uuid] for pinfo in unresp: if not pinfo.is_alive(): del self.procs[pinfo.uuid] # Submit any pending transfers while the limit is not exceeded while len(self.procs) < self.config.MAX_TRANSFERS and self.pending: (__, pinfo) = self.pending.popitem() # take the oldest one # Don't pipe stdout and stderr as we log all the output pinfo.proc = subprocess.Popen(['/usr/bin/eosarch_run.py', "{0}".format(pinfo.orig_req)], close_fds=True) pinfo.pid = pinfo.proc.pid self.procs[pinfo.uuid] = pinfo def start_transfer(self, req_json): """ Start new transfer Args: req_json (json): New transfer information which must include: { cmd: get/put/delete/purge/backup, src: full URL to archive file in EOS. opt: retry | '' uid: client uid gid: client gid } Returns: A message which is sent to the EOS MGM informing about the status of the request. """ self.logger.debug("Start transfer {0}".format(req_json)) pinfo = ProcessInfo(req_json) self.logger.debug("Creating job={0}, path={1}".format(pinfo.uuid, pinfo.root_dir)) if pinfo.uuid in self.procs: err_msg = "Job with same uuid={0} already exists".format(pinfo.uuid) self.logger.error(err_msg) return "ERROR error: job with same signature exists" if len(self.procs) >= self.config.MAX_TRANSFERS: self.logger.warning("Maximum number of concurrent transfers reached, " "adding job={0} to the pending list".format(pinfo.uuid)) self.pending[pinfo.uuid] = pinfo return "OK Id={0} added to the pending list".format(pinfo.uuid) # Don't pipe stdout and stderr as we log all the output pinfo.proc = subprocess.Popen(['/usr/bin/eosarch_run.py', "{0}".format(req_json)], close_fds=True) pinfo.pid = pinfo.proc.pid self.procs[pinfo.uuid] = pinfo return "OK Id={0}".format(pinfo.uuid) def do_show_transfers(self, req_json): """ Show onging transfers Args: req_json (JSON): Command in JSON format include: { cmd: transfers, opt: all/get/put/purge/delete/uuid, uid: uid, gid: gid } Returns: String with the result of the listing """ msg = "OK " row_data, proc_list = [], [] ls_type = req_json['opt'] self.logger.debug("Show transfers type={0}".format(ls_type)) if ls_type == "all": proc_list = [*self.procs.values(),*self.pending.values()] elif ls_type in self.procs: # ls_type is a transfer uuid proc_list.append(self.procs[ls_type]) else: proc_list = [elem for elem in self.procs.values() if elem.op == ls_type] proc_list.extend([elem for elem in self.pending.values() if elem.op == ls_type]) for proc in proc_list: line = ("date={0},uuid={1},path={2},op={3},status={4}".format( time.asctime(time.localtime(proc.timestamp)), proc.uuid, proc.orig_req['src'], proc.op, proc.status)) msg = '\n'.join([msg, line]) return msg def do_kill(self, req_json): """ Kill transfer. Args: req_json (JSON command): Arguments for kill command include: { cmd: kill, opt: uuid, uid: uid, gid: gid } """ msg = "OK" job_uuid = req_json['opt'] uid, gid = int(req_json['uid']), int(req_json['gid']) try: proc = self.procs[job_uuid] except KeyError as __: msg = "ERROR error: job not found" return msg if (uid == 0 or uid == proc.uid or (uid != proc.uid and gid == proc.gid)): self.logger.debug("Kill uuid={0} pid={1}".format(job_uuid, proc.pid)) kill_proc = subprocess.Popen(['kill', '-SIGTERM', str(proc.pid)], stdout=subprocess.PIPE, stderr=subprocess.PIPE) _, err = kill_proc.communicate() if kill_proc.returncode: msg = "ERROR error:" + err else: self.logger.error(("User uid/gid={0}/{1} permission denied to kill job " "with uid/gid={2}/{3}").format(uid, gid, proc.uid, proc.gid)) msg = "ERROR error: Permission denied - you are not owner of the job" self.logger.debug("Kill pid={0}, msg={0}".format(proc.pid, msg)) return msg def get_stats(self, req_json): """ Get archive daemon stats info. Args: req_json (JSON command): Arguments for stats command include: { cmd: stats, opt: \"\", uid: uid, gid: gid } Returns: string containing information about number of slots """ return "OK max={0} running={1} pending={2}".format( self.config.MAX_TRANSFERS, len(self.procs), len(self.pending)) def main(): """ Main function """ try: config = Configuration() except Exception as err: print("Configuration failed, error:{0}".format(err), file=sys.stderr) raise config.start_logging("dispatcher", config.LOG_FILE, True) logger = logging.getLogger("dispatcher") config.display() config.DIR = {} # Create the local directory structure in /var/eos/archive/ # i.e /var/eos/archive/get/, /var/eos/archive/put/ etc. for oper in [config.GET_OP, config.PUT_OP, config.PURGE_OP, config.DELETE_OP, config.BACKUP_OP]: path = config.EOS_ARCHIVE_DIR + oper + '/' config.DIR[oper] = path try: os.mkdir(path) except OSError as __: pass # directory exists # Prepare ZMQ IPC files os.umask(0o002) # set files with 775 by default for ipc_file in [config.FRONTEND_IPC, config.BACKEND_REQ_IPC, config.BACKEND_PUB_IPC]: if not os.path.exists(ipc_file): try: open(ipc_file, 'w').close() os.chmod(ipc_file, stat.S_IRWXU | stat.S_IRWXG | stat.S_IRWXO) except OSError as err: err_msg = ("Failed setting permissions on the IPC socket" " file={0}").format(ipc_file) logger.error(err_msg) raise except IOError as err: err_msg = ("Failed creating IPC socket file={0}").format(ipc_file) logger.error(err_msg) raise # Create dispatcher object dispatcher = Dispatcher(config) try: dispatcher.run() except Exception as err: logger.exception(err) if __name__ == '__main__': try: main() except ValueError as __: # This is to deal with exceptions thrown when trying to close the log # file which is already deleted manually by an exterior process pass ================================================ FILE: archive/eosarchived.service ================================================ # ---------------------------------------------------------------------- # File: eosarchived.service # Author: Elvin Sindrilaru - CERN # ---------------------------------------------------------------------- # # ************************************************************************ # * EOS - the CERN Disk Storage System * # * Copyright (C) 2018 CERN/Switzerland * # * * # * This program is free software: you can redistribute it and/or modify * # * it under the terms of the GNU General Public License as published by * # * the Free Software Foundation, either version 3 of the License, or * # * (at your option) any later version. * # * * # * This program is distributed in the hope that it will be useful, * # * but WITHOUT ANY WARRANTY; without even the implied warranty of * # * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * # * GNU General Public License for more details. * # * * # * You should have received a copy of the GNU General Public License * # * along with this program. If not, see .* # ************************************************************************ [Unit] Description=EOS archiver daemon After=network-online.target local-fs.target Wants=network-online.target local-fs.target [Service] WorkingDirectory=/var/eos/ #LogsDirectory=eos/archive/ EnvironmentFile=/etc/sysconfig/eosarchived_env ExecStart=/usr/bin/eosarchived.py Type=simple User=eosarchi Group=daemon Restart=on-abort RestartSec=5 LimitNOFILE=65000 KillMode=mixed SuccessExitStatus=KILL ================================================ FILE: archive/eosarchived_env.sysconfig ================================================ # Options for the eosarchived daemon # EOSARCHIVED_OPTIONS= # Enable core dumping DAEMON_COREFILE_LIMIT="unlimited" # Directory where log files are saved LOG_DIR="/var/log/eos/archive/" # This directory must match the one set in xrd.cf.mgm as it is used for the # communication between the MGM and the eosarchived daemon EOS_ARCHIVE_DIR=/var/eos/archive/ # Configuration file which can be modified while the daemon is running and # whose changes are automatically picked up by new transfers EOS_ARCHIVE_CONF=/etc/eosarchived.conf # This is the location of the archive keytab file containing just **one** entry # for the user account under which the eosarchived daemon is running. The same # entry need to be present in the eos.keytab file so that the eosarchived can # have full access to the EOS. XrdSecSSSKT=/etc/archive.keytab # Make eos-xrootd python bindings higher priority PYTHONPATH=/opt/eos/xrootd/lib64/python3.6/site-packages/ ================================================ FILE: archive/opt-eos-xrootd.pth ================================================ /opt/eos/xrootd/lib64/python3.6/site-packages/ ================================================ FILE: auth_plugin/CMakeLists.txt ================================================ #------------------------------------------------------------------------------- # File: CMakeLists.txt # Author: Elvin-Alin Sindrilaru CERN #------------------------------------------------------------------------------- # ************************************************************************ # * EOS - the CERN Disk Storage System * # * Copyright (C) 2013 CERN/Switzerland * # * * # * This program is free software: you can redistribute it and/or modify * # * it under the terms of the GNU General Public License as published by * # * the Free Software Foundation, either version 3 of the License, or * # * (at your option) any later version. * # * * # * This program is distributed in the hope that it will be useful, * # * but WITHOUT ANY WARRANTY; without even the implied warranty of * # * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * # * GNU General Public License for more details. * # * * # * You should have received a copy of the GNU General Public License * # * along with this program. If not, see .* # ************************************************************************ #------------------------------------------------------------------------------- # Generate all protocol buffer files #------------------------------------------------------------------------------- PROTOBUF_GENERATE_CPP(XSE_SRCS XSE_HDRS proto/XrdSecEntity.proto) PROTOBUF_GENERATE_CPP(XOEI_SRCS XOEI_HDRS proto/XrdOucErrInfo.proto) PROTOBUF_GENERATE_CPP(XSFS_SRCS XSFS_HDRS proto/XrdSfsFSctl.proto) PROTOBUF_GENERATE_CPP(STAT_SRCS STAT_HDRS proto/Stat.proto) PROTOBUF_GENERATE_CPP(FSCTL1_SRCS FSCTL1_HDRS proto/Fsctl.proto) PROTOBUF_GENERATE_CPP(FSCTL2_SRCS FSCTL2_HDRS proto/FS_ctl.proto) PROTOBUF_GENERATE_CPP(CHMOD_SRCS CHMOD_HDRS proto/Chmod.proto) PROTOBUF_GENERATE_CPP(CHKSUM_SRCS CHKSUM_HDRS proto/Chksum.proto) PROTOBUF_GENERATE_CPP(EXISTS_SRCS EXISTS_HDRS proto/Exists.proto) PROTOBUF_GENERATE_CPP(MKDIR_SRCS MKDIR_HDRS proto/Mkdir.proto) PROTOBUF_GENERATE_CPP(REMDIR_SRCS REMDIR_HDRS proto/Remdir.proto) PROTOBUF_GENERATE_CPP(REM_SRCS REM_HDRS proto/Rem.proto) PROTOBUF_GENERATE_CPP(RENAME_SRCS RENAME_HDRS proto/Rename.proto) PROTOBUF_GENERATE_CPP(XSP_SRCS XSP_HDRS proto/XrdSfsPrep.proto) PROTOBUF_GENERATE_CPP(PREPARE_SRCS PREPARE_HDRS proto/Prepare.proto) PROTOBUF_GENERATE_CPP(TRUNCATE_SRCS TRUNCATE_HDRS proto/Truncate.proto) PROTOBUF_GENERATE_CPP(DOPEN_SRCS DOPEN_HDRS proto/DirOpen.proto) PROTOBUF_GENERATE_CPP(DREAD_SRCS DREAD_HDRS proto/DirRead.proto) PROTOBUF_GENERATE_CPP(DFNAME_SRCS DFNAME_HDRS proto/DirFname.proto) PROTOBUF_GENERATE_CPP(DCLOSE_SRCS DCLOSE_HDRS proto/DirClose.proto) PROTOBUF_GENERATE_CPP(FOPEN_SRCS FOPEN_HDRS proto/FileOpen.proto) PROTOBUF_GENERATE_CPP(FFNAME_SRCS FFNAME_HDRS proto/FileFname.proto) PROTOBUF_GENERATE_CPP(FSTAT_SRCS FSTAT_HDRS proto/FileStat.proto) PROTOBUF_GENERATE_CPP(FREAD_SRCS FREAD_HDRS proto/FileRead.proto) PROTOBUF_GENERATE_CPP(FWRITE_SRCS FWRITE_HDRS proto/FileWrite.proto) PROTOBUF_GENERATE_CPP(FCLOSE_SRCS FCLOSE_HDRS proto/FileClose.proto) PROTOBUF_GENERATE_CPP(REQ_SRCS REQ_HDRS proto/Request.proto) PROTOBUF_GENERATE_CPP(RESP_SRCS RESP_HDRS proto/Response.proto) set(AUTH_PROTO_SRCS ${XSE_SRCS} ${XOEI_SRCS} ${XSFS_SRCS} ${STAT_SRCS} ${FSCTL1_SRCS} ${FSCTL2_SRCS} ${REQ_SRCS} ${RESP_SRCS} ${CHMOD_SRCS} ${CHKSUM_SRCS} ${EXISTS_SRCS} ${MKDIR_SRCS} ${REMDIR_SRCS} ${REM_SRCS} ${RENAME_SRCS} ${XSP_SRCS} ${PREPARE_SRCS} ${TRUNCATE_SRCS} ${DOPEN_SRCS} ${DREAD_SRCS} ${DFNAME_SRCS} ${DCLOSE_SRCS} ${FOPEN_SRCS} ${FCLOSE_SRCS} ${FFNAME_SRCS} ${FSTAT_SRCS} ${FREAD_SRCS} ${FWRITE_SRCS}) set(AUTH_PROTO_HDRS ${XSE_HDRS} ${XOEI_HDRS} ${XSFS_HDRS} ${STAT_HDRS} ${FSCTL1_HDRS} ${FSCTL2_HDRS} ${REQ_HDRS} ${RESP_HDRS} ${CHMOD_HDRS} ${CHKSUM_HDRS} ${EXITS_HDRS} ${MKDIR_HDRS} ${REMDIR_HDRS} ${REM_HDRS} ${RENAME_HDRS} ${XSP_HDRS} ${PREPARE_HDRS} ${TRUNCATE_HDRS} ${DOPEN_HDRS} ${DREAD_HDRS} ${DFNAME_HDRS} ${DCLOSE_HDRS} ${FOPEN_HDRS} ${FCLOSE_HDRS} ${FFNAME_HDRS} ${FSTAT_HDRS} ${FREAD_HDRS} ${FWRITE_HDRS}) set_source_files_properties( ${AUTH_PROTO_SRCS} ${AUTH_PROTO_HDRS} PROPERTIES GENERATED 1) #------------------------------------------------------------------------------- # EosAuthProto-Objects #------------------------------------------------------------------------------- add_library(EosAuthProto-Objects OBJECT ProtoUtils.cc ProtoUtils.hh ${AUTH_PROTO_SRCS} ${AUTH_PROTO_HDRS}) target_link_libraries(EosAuthProto-Objects PUBLIC PROTOBUF::PROTOBUF XROOTD::UTILS XROOTD::PRIVATE) target_include_directories(EosAuthProto-Objects PUBLIC $ $) set_target_properties(EosAuthProto-Objects PROPERTIES POSITION_INDEPENDENT_CODE TRUE) #------------------------------------------------------------------------------- # EosAuthOfs library #------------------------------------------------------------------------------- add_library(EosAuthOfs-${XRDPLUGIN_SOVERSION} MODULE EosAuthOfs.cc EosAuthOfs.hh EosAuthOfsFile.cc EosAuthOfsFile.hh EosAuthOfsDirectory.cc EosAuthOfsDirectory.hh) target_link_libraries( EosAuthOfs-${XRDPLUGIN_SOVERSION} PRIVATE EosAuthProto-Objects EosCommon ZMQ::ZMQ XROOTD::PRIVATE) install(TARGETS EosAuthOfs-${XRDPLUGIN_SOVERSION} LIBRARY DESTINATION ${CMAKE_INSTALL_FULL_LIBDIR} RUNTIME DESTINATION ${CMAKE_INSTALL_FULL_BINDIR} ARCHIVE DESTINATION ${CMAKE_INSTALL_FULL_LIBDIR}) ================================================ FILE: auth_plugin/EosAuthOfs.cc ================================================ //------------------------------------------------------------------------------ // File: EosAuthOfs.cc // Author: Elvin-Alin Sindrilaru CERN //------------------------------------------------------------------------------ /************************************************************************ * EOS - the CERN Disk Storage System * * Copyright (C) 2013 CERN/Switzerland * * * * This program is free software: you can redistribute it and/or modify * * it under the terms of the GNU General Public License as published by * * the Free Software Foundation, either version 3 of the License, or * * (at your option) any later version. * * * * This program is distributed in the hope that it will be useful, * * but WITHOUT ANY WARRANTY; without even the implied warranty of * * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * * GNU General Public License for more details. * * * * You should have received a copy of the GNU General Public License * * along with this program. If not, see .* ************************************************************************/ #include #include #include #include #include #include #include "EosAuthOfs.hh" #include "ProtoUtils.hh" #include "EosAuthOfsDirectory.hh" #include "EosAuthOfsFile.hh" #include "common/SymKeys.hh" #include #include #include #include #include #include #include #include #include #include // The global OFS handle eos::auth::EosAuthOfs* eos::auth::gOFS = nullptr; extern XrdSysError OfsEroute; extern XrdOfs* XrdOfsFS; XrdVERSIONINFO(XrdSfsGetFileSystem, AuthOfs); XrdVERSIONINFO(XrdSfsGetFileSystem2, AuthOfs); //------------------------------------------------------------------------------ // Filesystem Plugin factory function //------------------------------------------------------------------------------ extern "C" { //------------------------------------------------------------------------------ //! Filesystem Plugin factory function //! //! @description FileSystem2 version, to allow passing configuration info back //! to XRootD. Configure with: xrootd.fslib -2 libXrdEosMgm.so //! //! @param native_fs (not used) //! @param lp the logger object //! @param configfn the configuration file name //! @param envP pass configuration information back to XrdXrootd //! //! @returns configures and returns our MgmOfs object //------------------------------------------------------------------------------ XrdSfsFileSystem* XrdSfsGetFileSystem2(XrdSfsFileSystem* native_fs, XrdSysLogger* lp, const char* configfn, XrdOucEnv* envP) { if (eos::auth::gOFS) { // File system object already initialized return eos::auth::gOFS; } // Do the herald thing OfsEroute.SetPrefix("AuthOfs_"); OfsEroute.logger(lp); XrdOucString version = "AuthOfs (Object Storage File System) "; version += VERSION; OfsEroute.Say("++++++ (c) 2013 CERN/IT-DSS ", version.c_str()); // Initialize the subsystems eos::auth::gOFS = new eos::auth::EosAuthOfs(); eos::auth::gOFS->ConfigFN = (configfn && *configfn ? strdup(configfn) : 0); if (eos::auth::gOFS->Configure(OfsEroute, envP)) { return 0; } XrdOfsFS = eos::auth::gOFS; return eos::auth::gOFS; } //------------------------------------------------------------------------------ //! Filesystem Plugin factory function //! //! @param native_fs (not used) //! @param lp the logger object //! @param configfn the configuration file name //! //! @returns configures and returns our MgmOfs object //------------------------------------------------------------------------------ XrdSfsFileSystem* XrdSfsGetFileSystem(XrdSfsFileSystem* native_fs, XrdSysLogger* lp, const char* configfn) { if (eos::auth::gOFS) { // File system object already initialized OfsEroute.SetPrefix("AuthOfs_"); OfsEroute.logger(lp); OfsEroute.Say("info=\"return already loaded AUTH OFS pointer\""); return eos::auth::gOFS; } return XrdSfsGetFileSystem2(native_fs, lp, configfn, nullptr); } } // extern "C" EOSAUTHNAMESPACE_BEGIN //------------------------------------------------------------------------------ // Constructor //------------------------------------------------------------------------------ EosAuthOfs::EosAuthOfs(): XrdOfs(), eos::common::LogId(), proxy_tid(0), mFrontend(0), mSizePoolSocket(5), mPort(0), mCollapsePort(0), mLogLevel(LOG_INFO) { // Initialise the ZMQ client mZmqContext = new zmq::context_t(1); mBackend = std::make_pair(std::string(""), (zmq::socket_t*)0); // Set Logging parameters XrdOucString unit = "auth@localhost"; // setup the circular in-memory log buffer eos::common::Logging& g_logging = eos::common::Logging::GetInstance(); g_logging.SetLogPriority(mLogLevel); g_logging.SetUnit(unit.c_str()); eos_info("info=\"logging configured\""); } //------------------------------------------------------------------------------ // Destructor //------------------------------------------------------------------------------ EosAuthOfs::~EosAuthOfs() { zmq::socket_t* socket; // Kill the auth proxy thread if (proxy_tid) { XrdSysThread::Cancel(proxy_tid); XrdSysThread::Join(proxy_tid, 0); } // Release memory while (mPoolSocket.try_pop(socket)) { delete socket; } delete mFrontend; delete mBackend.second; delete mZmqContext; // Free configuration file name allocated via strdup during initialization if (ConfigFN) { free(ConfigFN); ConfigFN = nullptr; } } //------------------------------------------------------------------------------ // Configure routine //------------------------------------------------------------------------------ int EosAuthOfs::Configure(XrdSysError& error, XrdOucEnv* envP) { int NoGo = 0; int cfgFD; char* var; const char* val; std::string space_tkn; // Configure the basic XrdOfs and exit if not successful NoGo = XrdOfs::Configure(error, envP); eos::common::Logging& g_logging = eos::common::Logging::GetInstance(); if (NoGo) { return NoGo; } mPort = myPort; mCollapsePort = mPort; // by default we collapse on the same AUTH service port on a remote machine // Get the hostname const char* errtext = 0; const char* host_name = XrdNetUtils::MyHostName(0, &errtext); if (!host_name) { error.Emsg("Config", "hostname is invalid : %s", host_name); return 1; } XrdNetAddr* addrs = 0; int nAddrs = 0; const char* err = XrdNetUtils::GetAddrs(host_name, &addrs, nAddrs, XrdNetUtils::allIPv64, XrdNetUtils::NoPortRaw); free(const_cast(host_name)); if (err) { error.Emsg("Config", "hostname is invalid : %s", err); return 1; } if (nAddrs == 0) { error.Emsg("Config", "hostname is invalid"); return 1; } char buffer[64]; int length = addrs[0].Format(buffer, sizeof(buffer), XrdNetAddrInfo::fmtAddr, XrdNetAddrInfo::noPortRaw); delete [] addrs; if (length == 0) { error.Emsg("Config", "hostname is invalid"); return 1; } mManagerIp.assign(buffer, length); // Extract the manager from the config file XrdOucStream Config(&error, getenv("XRDINSTANCE")); // Read in the auth configuration from the xrd.cf.auth file if (!ConfigFN || !*ConfigFN) { NoGo = 1; error.Emsg("Configure", "no configure file"); } else { // Try to open the configuration file. if ((cfgFD = open(ConfigFN, O_RDONLY, 0)) < 0) { return error.Emsg("Configure", errno, "open config file fn=", ConfigFN); } Config.Attach(cfgFD); std::string auth_tag = "eosauth."; while ((var = Config.GetMyFirstWord())) { if (!strncmp(var, auth_tag.c_str(), auth_tag.length())) { var += auth_tag.length(); // Get EOS instance to which we dispatch requests. Note that the port is the one // waiting for authentication requests and not the usual one i.e 1094. The presence // of the mgm parameter is mandatory. std::string mgm_instance; std::string option_tag = "mgm"; if (!strncmp(var, option_tag.c_str(), option_tag.length())) { if ((val = Config.GetWord())) { mgm_instance = val; if (mgm_instance.find(":") != std::string::npos) { mBackend = std::make_pair(mgm_instance, (zmq::socket_t*)0); } } else { // This parameter is critical error.Emsg("Configure ", "No EOS mgm instance provided"); NoGo = 1; } } // Get number of sockets in the pool by default 10 option_tag = "collapseport"; if (!strncmp(var, option_tag.c_str(), option_tag.length())) { if (!(val = Config.GetWord())) { error.Emsg("Configure ", "No collapseport specified"); } else { mCollapsePort = atoi(val); } } // Get number of sockets in the pool by default 10 option_tag = "numsockets"; if (!strncmp(var, option_tag.c_str(), option_tag.length())) { if (!(val = Config.GetWord())) { error.Emsg("Configure ", "No number of sockets specified"); } else { mSizePoolSocket = atoi(val); } } // Get log level by default LOG_INFO option_tag = "loglevel"; if (!strncmp(var, option_tag.c_str(), option_tag.length())) { if (!(val = Config.GetWord())) { error.Emsg("Config", "argument for debug level invalid set to ERR."); mLogLevel = LOG_INFO; } else { std::string str_val(val); if (isdigit(str_val[0])) { // The level is given as a number mLogLevel = atoi(val); } else { // The level is given as a string mLogLevel = g_logging.GetPriorityByString(val); } error.Say("=====> eosauth.loglevel: ", g_logging.GetPriorityString(mLogLevel), ""); } // Set the new log level g_logging.SetLogPriority(mLogLevel); } } } // Check and connect at least to an MGM master if (!mBackend.first.empty()) { if ((XrdSysThread::Run(&proxy_tid, EosAuthOfs::StartAuthProxyThread, static_cast(this), 0, "Auth Proxy Thread"))) { eos_err("cannot start the authentication proxy thread"); NoGo = 1; } // Create a pool of sockets connected to the master proxy service for (int i = 0; i < mSizePoolSocket; i++) { // Set socket receive timeout to 5 seconds zmq::socket_t* socket = new zmq::socket_t(*mZmqContext, ZMQ_REQ); int timeout_mili = 5000; socket->set(zmq::sockopt::rcvtimeo, timeout_mili); int socket_linger = 0; socket->set(zmq::sockopt::linger, socket_linger); std::string endpoint = "inproc://proxyfrontend"; // Try in a loop to connect to the proxyfrontend as it can take a while for // the proxy thread to do the binding, therefore connect can fail while (1) { try { socket->connect(endpoint.c_str()); } catch (zmq::error_t& err) { eos_warning("dealing with connect exception, retrying ..."); continue; } break; } mPoolSocket.push(socket); } } else { eos_err("No master MGM specified e.g. eos.master.cern.ch:15555"); NoGo = 1; } close(cfgFD); } //---------------------------------------------------------------------------- // Build the adler & sha1 checksum of the default keytab file //---------------------------------------------------------------------------- XrdOucString keytabcks = "unaccessible"; std::string keytab_path = "/etc/eos.keytab"; int fd = ::open(keytab_path.c_str(), O_RDONLY); XrdOucString symkey = ""; if (fd >= 0) { char buffer[65535]; char keydigest[SHA_DIGEST_LENGTH + 1]; SHA_CTX sha1; SHA1_Init(&sha1); size_t nread = ::read(fd, buffer, sizeof(buffer)); if (nread > 0) { unsigned int adler; SHA1_Update(&sha1, (const char*) buffer, nread); adler = adler32(0L, Z_NULL, 0); adler = adler32(adler, (const Bytef*) buffer, nread); char sadler[1024]; snprintf(sadler, sizeof(sadler) - 1, "%08x", adler); keytabcks = sadler; } else { eos_err("Failed while readling, error: %s", strerror(errno)); close(fd); return 1; } SHA1_Final((unsigned char*) keydigest, &sha1); eos::common::SymKey::Base64Encode(keydigest, SHA_DIGEST_LENGTH, symkey); close(fd); } else { eos_err("Failed to open keytab file: %s", keytab_path.c_str()); return 1; } eos_notice("AUTH_HOST=%s AUTH_PORT=%ld VERSION=%s RELEASE=%s KEYTABADLER=%s", mManagerIp.c_str(), myPort, VERSION, RELEASE, keytabcks.c_str()); if (!eos::common::gSymKeyStore.SetKey64(symkey.c_str(), 0)) { eos_crit("unable to store the created symmetric key %s", symkey.c_str()); NoGo = 1; } return NoGo; } //------------------------------------------------------------------------------ // Authentication proxy thread startup function //------------------------------------------------------------------------------ void* EosAuthOfs::StartAuthProxyThread(void* pp) { EosAuthOfs* ofs = static_cast(pp); ofs->AuthProxyThread(); return 0; } //------------------------------------------------------------------------------ // Authentication proxt thread which forwards requests form the clients // to the proper MGM intance. //------------------------------------------------------------------------------ void EosAuthOfs::AuthProxyThread() { // Bind the client facing socket mFrontend = new zmq::socket_t(*mZmqContext, ZMQ_ROUTER); mFrontend->bind("inproc://proxyfrontend"); // Connect sockets facing the MGM nodes - master and slave std::ostringstream sstr; mBackend = std::make_pair(mBackend.first, new zmq::socket_t(*mZmqContext, ZMQ_DEALER)); sstr << "tcp://" << mBackend.first; mBackend.second->connect(sstr.str().c_str()); OfsEroute.Say("=====> connected to MGM: ", mBackend.first.c_str()); // Set the master to point to the master MGM - no need for lock auto master = mBackend.second; int rc = -1; zmq::message_t msg; // Start the proxy using the first entry int more; int poll_size = 2; zmq::pollitem_t items[3] = { { (void*)* mFrontend, 0, ZMQ_POLLIN, 0}, { (void*)* mBackend.second, 0, ZMQ_POLLIN, 0} }; // Main loop in which the proxy thread accepts request from the clients and // then he forwards them to the current master MGM. The master MGM can change // at any point. while (true) { // Wait while there are either requests or replies to process try { #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wdeprecated-declarations" rc = zmq::poll(&items[0], poll_size, -1); #pragma GCC diagnostic pop } catch (zmq::error_t& e) { eos_err("Exception thrown: %s", e.what()); } if (rc < 0) { eos_err("error in poll"); return; } // Process a request if (items[0].revents & ZMQ_POLLIN) { eos_debug("got frontend event"); zmq::recv_flags rf = zmq::recv_flags::none; while (true) { if (!mFrontend->recv(msg, rf).has_value()) { eos_err("error while recv on frontend"); return; } try { more = mFrontend->get(zmq::sockopt::rcvmore); } catch (zmq::error_t& err) { eos_err("exception in getsockopt"); return; } // Send request to the MGM { XrdSysMutexHelper scop_lock(mMutexMaster); zmq::send_flags sf = zmq::send_flags::none; if (more) { sf = zmq::send_flags::sndmore; } if (!master->send(msg, sf)) { eos_err("error while sending to master"); return; } } if (more == 0) { break; } } } // Process a reply from the first MGM if (items[1].revents & ZMQ_POLLIN) { eos_debug("got mBackend event"); zmq::recv_flags rf = zmq::recv_flags::none; while (true) { if (!mBackend.second->recv(msg, rf).has_value()) { eos_err("error while recv on mBackend"); return; } try { more = mBackend.second->get(zmq::sockopt::rcvmore); } catch (zmq::error_t& err) { eos_err("exception in getsockopt"); return; } zmq::send_flags sf = zmq::send_flags::none; if (more) { sf = zmq::send_flags::sndmore; } if (!mFrontend->send(msg, sf)) { eos_err("error while send to frontend(1)"); return; } if (more == 0) { break; } } } } } //------------------------------------------------------------------------------ // Get directory object //------------------------------------------------------------------------------ XrdSfsDirectory* EosAuthOfs::newDir(char* user, int MonID) { return static_cast(new EosAuthOfsDirectory(user, MonID)); } //------------------------------------------------------------------------------ // Get file object //------------------------------------------------------------------------------ XrdSfsFile* EosAuthOfs::newFile(char* user, int MonID) { return static_cast(new EosAuthOfsFile(user, MonID)); } void EosAuthOfs::ProcessError(eos::auth::ResponseProto* resp_func, XrdOucErrInfo& error, const char* path) { if (resp_func->has_error()) { if (resp_func->collapse() && path && strlen(path)) { // collpase redirection to remote AUTH MGM std::string url = "root://"; url += resp_func->error().message();; url += ":"; url += std::to_string(mCollapsePort); url += "/"; url += path; error.setErrInfo(~(~(-1) | kXR_collapseRedir), url.c_str()); } else { // one-shot redirection to remote MGM error.setErrInfo(resp_func->error().code(), resp_func->error().message().c_str()); } } } //------------------------------------------------------------------------------ //! Stat method //------------------------------------------------------------------------------ int EosAuthOfs::stat(const char* path, struct stat* buf, XrdOucErrInfo& error, const XrdSecEntity* client, const char* opaque) { int retc = SFS_ERROR; eos_debug("stat path=%s", path); // Create request object RequestProto* req_proto = utils::GetStatRequest(RequestProto_OperationType_STAT, path, error, client, opaque); // Compute HMAC for request object if (!utils::ComputeHMAC(req_proto)) { eos_err("error HMAC FS stat"); delete req_proto; return retc; } // Get a socket object from the pool zmq::socket_t* socket; mPoolSocket.wait_pop(socket); if (SendProtoBufRequest(socket, req_proto)) { ResponseProto* resp_stat = static_cast(GetResponse(socket)); if (resp_stat) { retc = resp_stat->response(); ProcessError(resp_stat, error, path); // We retrieve the struct stat if response is ok if ((retc == SFS_OK) && resp_stat->has_message()) { buf = static_cast(memcpy((void*)buf, resp_stat->message().c_str(), sizeof(struct stat))); } delete resp_stat; } } // Release socket and free memory gOFS->mPoolSocket.push(socket); delete req_proto; return retc; } //-------------------------------------------------------------------------- // Stat function to retrieve mode //-------------------------------------------------------------------------- int EosAuthOfs::stat(const char* path, mode_t& mode, XrdOucErrInfo& error, const XrdSecEntity* client, const char* opaque) { int retc = SFS_ERROR; eos_debug("statm path=%s", path); RequestProto* req_proto = utils::GetStatRequest( RequestProto_OperationType_STATM, path, error, client, opaque); // Compute HMAC for request object if (!utils::ComputeHMAC(req_proto)) { eos_err("error HMAC FS statm"); delete req_proto; return retc; } // Get a socket object from the pool zmq::socket_t* socket; mPoolSocket.wait_pop(socket); if (SendProtoBufRequest(socket, req_proto)) { ResponseProto* resp_stat = static_cast(GetResponse(socket)); if (resp_stat) { retc = resp_stat->response(); ProcessError(resp_stat, error, path); // We retrieve the open mode if response if ok if ((retc == SFS_OK) && resp_stat->has_message()) { memcpy((void*)&mode, resp_stat->message().c_str(), sizeof(mode_t)); } delete resp_stat; } } // Release socket and free memory gOFS->mPoolSocket.push(socket); delete req_proto; return retc; } //------------------------------------------------------------------------------ // Execute file system command //------------------------------------------------------------------------------ int EosAuthOfs::fsctl(const int cmd, const char* args, XrdOucErrInfo& error, const XrdSecEntity* client) { int retc = SFS_ERROR; eos_debug("fsctl with cmd=%i, args=%s", cmd, args); int opcode = cmd & SFS_FSCTL_CMD; // For the server configuration query we asnwer with the information of the // authentication XRootD server i.e. don't frw it to the real MGM. if (opcode == SFS_FSCTL_LOCATE) { char locResp[4096]; char rType[3], *Resp[] = {rType, locResp}; rType[0] = 'S'; // don't manage writes via global redirection - therefore we mark the files as 'r' rType[1] = 'r'; rType[2] = '\0'; sprintf(locResp, "[::%s]:%d ", (char*) gOFS->mManagerIp.c_str(), gOFS->mPort); error.setErrInfo(strlen(locResp) + 3, (const char**) Resp, 2); return SFS_DATA; } RequestProto* req_proto = utils::GetFsctlRequest(cmd, args, error, client); // Compute HMAC for request object if (!utils::ComputeHMAC(req_proto)) { eos_err("error HMAC FS fsctl"); delete req_proto; return retc; } // Get a socket object from the pool zmq::socket_t* socket; mPoolSocket.wait_pop(socket); if (SendProtoBufRequest(socket, req_proto)) { ResponseProto* resp_fsctl1 = static_cast(GetResponse(socket)); if (resp_fsctl1) { retc = resp_fsctl1->response(); // TODO: we can't collapse without a path ... ProcessError(resp_fsctl1, error, 0); delete resp_fsctl1; } } // Release socket and free memory gOFS->mPoolSocket.push(socket); delete req_proto; return retc; } //------------------------------------------------------------------------------ // Execute file system command !!! FSctl !!! //------------------------------------------------------------------------------ int EosAuthOfs::FSctl(const int cmd, XrdSfsFSctl& args, XrdOucErrInfo& error, const XrdSecEntity* client) { int retc = SFS_ERROR; eos_debug("FSctl with cmd=%i", cmd); RequestProto* req_proto = utils::GetFSctlRequest(cmd, args, error, client); // Compute HMAC for request object if (!utils::ComputeHMAC(req_proto)) { eos_err("error HMAC FS FSctl"); delete req_proto; return retc; } // Get a socket object from the pool zmq::socket_t* socket; mPoolSocket.wait_pop(socket); if (SendProtoBufRequest(socket, req_proto)) { ResponseProto* resp_fsctl2 = static_cast(GetResponse(socket)); if (resp_fsctl2) { retc = resp_fsctl2->response(); // TODO: we can't collapse without a path ... ProcessError(resp_fsctl2, error, 0); delete resp_fsctl2; } } // Release socket and free memory gOFS->mPoolSocket.push(socket); delete req_proto; return retc; } //------------------------------------------------------------------------------ // Chmod by client //------------------------------------------------------------------------------ int EosAuthOfs::chmod(const char* path, XrdSfsMode mode, XrdOucErrInfo& error, const XrdSecEntity* client, const char* opaque) { int retc = SFS_ERROR; eos_debug("chmod path=%s mode=%o", path, mode); RequestProto* req_proto = utils::GetChmodRequest(path, mode, error, client, opaque); // Compute HMAC for request object if (!utils::ComputeHMAC(req_proto)) { eos_err("error HMAC FS chmod"); delete req_proto; return retc; } // Get a socket object from the pool zmq::socket_t* socket; mPoolSocket.wait_pop(socket); if (SendProtoBufRequest(socket, req_proto)) { ResponseProto* resp_chmod = static_cast(GetResponse(socket)); if (resp_chmod) { retc = resp_chmod->response(); ProcessError(resp_chmod, error, path); delete resp_chmod; } } // Release socket and free memory gOFS->mPoolSocket.push(socket); delete req_proto; return retc; } //------------------------------------------------------------------------------ // Chksum by client //------------------------------------------------------------------------------ int EosAuthOfs::chksum(csFunc func, const char* csName, const char* path, XrdOucErrInfo& error, const XrdSecEntity* client, const char* opaque) { int retc = SFS_ERROR; eos_debug("chksum path=%s csName=%s", path, csName); RequestProto* req_proto = utils::GetChksumRequest(func, csName, path, error, client, opaque); // Compute HMAC for request object if (!utils::ComputeHMAC(req_proto)) { eos_err("error HMAC FS chksum"); delete req_proto; return retc; } // Get a socket object from the pool zmq::socket_t* socket; mPoolSocket.wait_pop(socket); if (SendProtoBufRequest(socket, req_proto)) { ResponseProto* resp_chksum = static_cast(GetResponse(socket)); if (resp_chksum) { retc = resp_chksum->response(); eos_debug("chksum retc=%i", retc); ProcessError(resp_chksum, error, path); delete resp_chksum; } } // Release socket and free memory gOFS->mPoolSocket.push(socket); delete req_proto; return retc; } //------------------------------------------------------------------------------ // Exists function //------------------------------------------------------------------------------ int EosAuthOfs::exists(const char* path, XrdSfsFileExistence& exists_flag, XrdOucErrInfo& error, const XrdSecEntity* client, const char* opaque) { int retc = SFS_ERROR; eos_debug("exists path=%s", path); RequestProto* req_proto = utils::GetExistsRequest(path, error, client, opaque); // Compute HMAC for request object if (!utils::ComputeHMAC(req_proto)) { eos_err("error HMAC FS exists"); delete req_proto; return retc; } // Get a socket object from the pool zmq::socket_t* socket; mPoolSocket.wait_pop(socket); if (SendProtoBufRequest(socket, req_proto)) { ResponseProto* resp_exists = static_cast(GetResponse(socket)); if (resp_exists) { retc = resp_exists->response(); eos_debug("exists retc=%i", retc); ProcessError(resp_exists, error, path); if (resp_exists->has_message()) { exists_flag = (XrdSfsFileExistence)atoi(resp_exists->message().c_str()); } delete resp_exists; } } // Release socket and free memory gOFS->mPoolSocket.push(socket); delete req_proto; return retc; } //------------------------------------------------------------------------------ // Create directory // Note: the mode set here is actually ignored if the directoy is not the top // one. The new directory inherits the mode bits from its parent directory. // This is typical only for EOS since in a normal XRootD server the access bits // specified in the mkdir command are actually applied as expected. //------------------------------------------------------------------------------ int EosAuthOfs::mkdir(const char* path, XrdSfsMode mode, // Ignored in EOS if it has a parent dir XrdOucErrInfo& error, const XrdSecEntity* client, const char* opaque) { int retc = SFS_ERROR; eos_debug("mkdir path=%s mode=%o", path, mode); RequestProto* req_proto = utils::GetMkdirRequest(path, mode, error, client, opaque); // Compute HMAC for request object if (!utils::ComputeHMAC(req_proto)) { eos_err("error HMAC FS mkdir"); delete req_proto; return retc; } // Get a socket object from the pool zmq::socket_t* socket; mPoolSocket.wait_pop(socket); if (SendProtoBufRequest(socket, req_proto)) { ResponseProto* resp_mkdir = static_cast(GetResponse(socket)); if (resp_mkdir) { retc = resp_mkdir->response(); eos_debug("mkdir retc=%i", retc); ProcessError(resp_mkdir, error, path); delete resp_mkdir; } } // Release socket and free memory gOFS->mPoolSocket.push(socket); delete req_proto; return retc; } //------------------------------------------------------------------------------ // Remove directory //------------------------------------------------------------------------------ int EosAuthOfs::remdir(const char* path, XrdOucErrInfo& error, const XrdSecEntity* client, const char* opaque) { int retc = SFS_ERROR; eos_debug("remdir path=%s", path); RequestProto* req_proto = utils::GetRemdirRequest(path, error, client, opaque); // Compute HMAC for request object if (!utils::ComputeHMAC(req_proto)) { eos_err("error HMAC FS remdir"); delete req_proto; return retc; } // Get a socket object from the pool zmq::socket_t* socket; mPoolSocket.wait_pop(socket); if (SendProtoBufRequest(socket, req_proto)) { ResponseProto* resp_remdir = static_cast(GetResponse(socket)); if (resp_remdir) { retc = resp_remdir->response(); eos_debug("remdir retc=%i", retc); ProcessError(resp_remdir, error, path); delete resp_remdir; } } // Release socket and free memory gOFS->mPoolSocket.push(socket); delete req_proto; return retc; } //------------------------------------------------------------------------------ // Remove file //------------------------------------------------------------------------------ int EosAuthOfs::rem(const char* path, XrdOucErrInfo& error, const XrdSecEntity* client, const char* opaque) { int retc = SFS_ERROR; eos_debug("rem path=%s", path); RequestProto* req_proto = utils::GetRemRequest(path, error, client, opaque); // Compute HMAC for request object if (!utils::ComputeHMAC(req_proto)) { eos_err("error HMAC FS rem"); delete req_proto; return retc; } // Get a socket object from the pool zmq::socket_t* socket; mPoolSocket.wait_pop(socket); if (SendProtoBufRequest(socket, req_proto)) { ResponseProto* resp_rem = static_cast(GetResponse(socket)); if (resp_rem) { retc = resp_rem->response(); eos_debug("rem retc=%i", retc); ProcessError(resp_rem, error, path); delete resp_rem; } } // Release socket and free memory gOFS->mPoolSocket.push(socket); delete req_proto; return retc; } //------------------------------------------------------------------------------ // Rename file //------------------------------------------------------------------------------ int EosAuthOfs::rename(const char* oldName, const char* newName, XrdOucErrInfo& error, const XrdSecEntity* client, const char* opaqueO, const char* opaqueN) { int retc = SFS_ERROR; eos_debug("rename oldname=%s newname=%s", oldName, newName); RequestProto* req_proto = utils::GetRenameRequest(oldName, newName, error, client, opaqueO, opaqueN); // Compute HMAC for request object if (!utils::ComputeHMAC(req_proto)) { eos_err("error HMAC FS rename"); delete req_proto; return retc; } // Get a socket object from the pool zmq::socket_t* socket; mPoolSocket.wait_pop(socket); if (SendProtoBufRequest(socket, req_proto)) { ResponseProto* resp_rename = static_cast(GetResponse(socket)); if (resp_rename) { retc = resp_rename->response(); eos_debug("rename retc=%i", retc); ProcessError(resp_rename, error, 0); delete resp_rename; } } // Release socket and free memory gOFS->mPoolSocket.push(socket); delete req_proto; return retc; } //------------------------------------------------------------------------------ // Prepare request //------------------------------------------------------------------------------ int EosAuthOfs::prepare(XrdSfsPrep& pargs, XrdOucErrInfo& error, const XrdSecEntity* client) { int retc = SFS_ERROR; eos_debug("prepare"); RequestProto* req_proto = utils::GetPrepareRequest(pargs, error, client); // Compute HMAC for request object if (!utils::ComputeHMAC(req_proto)) { eos_err("error HMAC FS prepare"); delete req_proto; return retc; } // Get a socket object from the pool zmq::socket_t* socket; mPoolSocket.wait_pop(socket); if (SendProtoBufRequest(socket, req_proto)) { ResponseProto* resp_prepare = static_cast(GetResponse(socket)); if (resp_prepare) { retc = resp_prepare->response(); eos_debug("prepare retc=%i", retc); ProcessError(resp_prepare, error, 0); delete resp_prepare; } } // Release socket and free memory gOFS->mPoolSocket.push(socket); delete req_proto; return retc; } //------------------------------------------------------------------------------ // Truncate file //------------------------------------------------------------------------------ int EosAuthOfs::truncate(const char* path, XrdSfsFileOffset fileOffset, XrdOucErrInfo& error, const XrdSecEntity* client, const char* opaque) { int retc = SFS_ERROR; eos_debug("truncate"); RequestProto* req_proto = utils::GetTruncateRequest(path, fileOffset, error, client, opaque); // Compute HMAC for request object if (!utils::ComputeHMAC(req_proto)) { eos_err("error HMAC FS truncate"); delete req_proto; return retc; } // Get a socket object from the pool zmq::socket_t* socket; mPoolSocket.wait_pop(socket); if (SendProtoBufRequest(socket, req_proto)) { ResponseProto* resp_truncate = static_cast(GetResponse(socket)); if (resp_truncate) { retc = resp_truncate->response(); eos_debug("truncate retc=%i", retc); ProcessError(resp_truncate, error, path); delete resp_truncate; } } // Release socket and free memory gOFS->mPoolSocket.push(socket); delete req_proto; return retc; } //------------------------------------------------------------------------------ // getStats function - not supported by EOS, fake ok response HERE i.e. do not // build and send a request to the real MGM //------------------------------------------------------------------------------ int EosAuthOfs::getStats(char* buff, int blen) { int retc = SFS_OK; eos_debug("getStats"); return retc; } //------------------------------------------------------------------------------ // Send ProtocolBuffer object using ZMQ //------------------------------------------------------------------------------ bool EosAuthOfs::SendProtoBufRequest(zmq::socket_t* socket, google::protobuf::Message* message) { // Send the request bool sent = false; #if GOOGLE_PROTOBUF_VERSION < 3004000 int msg_size = message->ByteSize(); #else int msg_size = message->ByteSizeLong(); #endif zmq::message_t request(msg_size); google::protobuf::io::ArrayOutputStream aos(request.data(), msg_size); if (!message->SerializeToZeroCopyStream(&aos)) { eos_err("failed to serialize message"); return sent; } zmq::send_flags sf = zmq::send_flags::dontwait; auto r = socket->send(request, sf); if (r.has_value()) { sent = true; } if (!sent) { eos_err("unable to send request using zmq"); } return sent; } //------------------------------------------------------------------------------ // Get ProtocolBuffer response object using ZMQ //------------------------------------------------------------------------------ google::protobuf::Message* EosAuthOfs::GetResponse(zmq::socket_t*& socket) { // It makes no sense to wait more than 1 min since the XRootD client will // timeout by default after 60 seconds. int num_retries = 12; // 1 min = 12 * 5 sec bool done = false; bool reset_socket = false; zmq::message_t reply; ResponseProto* resp = static_cast(0); try { zmq::recv_flags rf = zmq::recv_flags::none; zmq::recv_result_t rr; do { rr = socket->recv(reply, rf); --num_retries; if (!rr.has_value()) { eos_err("ptr_socket=%p, num_retries=%i failed receive", socket, num_retries); } else { done = true; } } while (!rr.has_value() && (num_retries > 0)); } catch (zmq::error_t& e) { eos_err("socket error: %s", e.what()); reset_socket = true; } // We time out while waiting for a response or a fatal error occurent - // then we throw away the socket and create a new one if ((num_retries <= 0) || reset_socket) { eos_err("discard current socket and create a new one"); delete socket; socket = new zmq::socket_t(*mZmqContext, ZMQ_REQ); int timeout_mili = 5000; socket->set(zmq::sockopt::rcvtimeo, timeout_mili); int socket_linger = 0; socket->set(zmq::sockopt::linger, socket_linger); std::string endpoint = "inproc://proxyfrontend"; // Try in a loop to connect to the proxyfrontend as it can take a while for // the proxy thread to do the binding, therefore connect can fail while (1) { try { socket->connect(endpoint.c_str()); } catch (zmq::error_t& err) { eos_warning("dealing with connect exception, retrying ..."); continue; } break; } } if (done) { std::string resp_str = std::string(static_cast(reply.data()), reply.size()); resp = new ResponseProto(); resp->ParseFromString(resp_str); // If the response is a redirect we redirect to our own port number on the target // - this allows to failover access from one AUTH to another AUTH daemon in an HA MGM setup if (resp->response() == SFS_REDIRECT) { if (resp->has_error()) { std::ostringstream sstr; sstr << resp->error().message(); std::string redirect_host = sstr.str(); // Add redirect collapse resp->set_collapse(true); } else { eos_err("redirect message without error information - change to error"); resp->set_response(SFS_ERROR); } } } else { eos_err("socket error/timeout during receive"); } return resp; } EOSAUTHNAMESPACE_END ================================================ FILE: auth_plugin/EosAuthOfs.hh ================================================ // ----------------------------------------------------------------------------- // File: EosAuthOfs.hh // Author: Elvin-Alin Sindrilaru - CERN // ----------------------------------------------------------------------------- /************************************************************************ * EOS - the CERN Disk Storage System * * Copyright (C) 2013 CERN/Switzerland * * * * This program is free software: you can redistribute it and/or modify * * it under the terms of the GNU General Public License as published by * * the Free Software Foundation, either version 3 of the License, or * * (at your option) any later version. * * * * This program is distributed in the hope that it will be useful, * * but WITHOUT ANY WARRANTY; without even the implied warranty of * * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * * GNU General Public License for more details. * * * * You should have received a copy of the GNU General Public License * * along with this program. If not, see .* ************************************************************************/ #ifndef __EOSAUTH_OFS_HH__ #define __EOSAUTH_OFS_HH__ #include #include "Namespace.hh" #include "common/ConcurrentQueue.hh" #include #include //! Forward declaration class EosAuthOfsDirectory; class EosAuthOfsFile; namespace eos { namespace auth { class ResponseProto; } } namespace google { namespace protobuf { class Message; } } EOSAUTHNAMESPACE_BEGIN //------------------------------------------------------------------------------ //! Class EosAuthOfs built on top of XrdOfs /*! Decription: The libEosAuthOfs.so is inteded to be used as an OFS library plugin with a vanilla XRootD server. What it does is to connect using ZMQ sockets to onel MGM node. a slave MGM). The endpoint is read from the configuration file, by default it will connect to localhost:1094 ! The EosAuthOfs plugin then tries to replay all the requests it receives from the clients to the master MGM node. It does this by marshalling the request and identity of the client using ProtocolBuffers and sends this request using ZMQ to the configured MGM node. There are several tunable parameters for this configuration (auth + MGMs): AUTH - configuration ==================== - eosauth.mgm - contain the hostname and the port to which ZMQ will connect so that it can forward requests and receive responses. - eosauth.numsockets - once a clients wants to send a request the thread allocated to him in XRootD will require a socket to send the request to the MGM node. Therefore, we set up a pool of sockets from the begining which can be used to send/receiver requests/responses. The default size is 10 sockets. MGM - configuration =================== - mgmofs.auththreads - since we now receive requests using ZMQ, we no longer use the default thread pool from XRootD and we need threads for dealing with the requests. This parameter sets the thread pool size when starting the MGM node. - mgmofs.authport - this is the endpoint where the MGM listens for ZMQ requests from any EosAuthOfs plugins. This port needs to be opened also in the firewall. - mgmofs.localhost true|false - by default the ZMQ endpoint will listen on all interfaces, but often the front-end will run on the same node and for security we want only to have localhost connections In case of a master <=> slave switch the EosAuthOfs plugin adapts automatically based on the information provided by the slave MGM which should redirect all clients with write requests to the master node. Care should be taken when specifying the two endpoints since the switch is done ONLY IF the redirection HOST matches one of the two endpoints specified in the configuration of the authentication plugin (namely eosauth.instance). Once the switch is done all requests be them read or write are sent to the new master MGM node. */ //------------------------------------------------------------------------------ class EosAuthOfs: public XrdOfs, public eos::common::LogId { friend class EosAuthOfsDirectory; friend class EosAuthOfsFile; public: //-------------------------------------------------------------------------- //! Constuctor //-------------------------------------------------------------------------- EosAuthOfs(); //-------------------------------------------------------------------------- //! Destructor //-------------------------------------------------------------------------- virtual ~EosAuthOfs(); //-------------------------------------------------------------------------- //! Configure routine //-------------------------------------------------------------------------- virtual int Configure(XrdSysError& error, XrdOucEnv* envP); //-------------------------------------------------------------------------- //! Get directory object //-------------------------------------------------------------------------- XrdSfsDirectory* newDir(char* user = 0, int MonID = 0); //-------------------------------------------------------------------------- // Get file object //-------------------------------------------------------------------------- XrdSfsFile* newFile(char* user = 0, int MonID = 0); //-------------------------------------------------------------------------- //! Stat function //-------------------------------------------------------------------------- int stat(const char* path, struct stat* buf, XrdOucErrInfo& error, const XrdSecEntity* client, const char* opaque = 0); //-------------------------------------------------------------------------- //! Stat function to retrieve mode //-------------------------------------------------------------------------- int stat(const char* name, mode_t& mode, XrdOucErrInfo& out_error, const XrdSecEntity* client, const char* opaque = 0); //-------------------------------------------------------------------------- //! Execute file system command !!! fsctl !!! //-------------------------------------------------------------------------- int fsctl(const int cmd, const char* args, XrdOucErrInfo& out_error, const XrdSecEntity* client); //-------------------------------------------------------------------------- //! Execute file system command !!! FSctl !!! //-------------------------------------------------------------------------- int FSctl(const int cmd, XrdSfsFSctl& args, XrdOucErrInfo& error, const XrdSecEntity* client = 0); //-------------------------------------------------------------------------- //! Chmod function //-------------------------------------------------------------------------- int chmod(const char* path, XrdSfsMode mopde, XrdOucErrInfo& error, const XrdSecEntity* client, const char* opaque = 0); //-------------------------------------------------------------------------- //! Chksum function //-------------------------------------------------------------------------- int chksum(csFunc func, const char* csName, const char* path, XrdOucErrInfo& error, const XrdSecEntity* client = 0, const char* opaque = 0); //-------------------------------------------------------------------------- //! Exists function //-------------------------------------------------------------------------- int exists(const char* path, XrdSfsFileExistence& exists_flag, XrdOucErrInfo& error, const XrdSecEntity* client, const char* opaque = 0); //-------------------------------------------------------------------------- //! Create directory //-------------------------------------------------------------------------- int mkdir(const char* dirName, XrdSfsMode Mode, XrdOucErrInfo& out_error, const XrdSecEntity* client, const char* opaque = 0); //-------------------------------------------------------------------------- //! Remove directory //-------------------------------------------------------------------------- int remdir(const char* path, XrdOucErrInfo& error, const XrdSecEntity* client, const char* opaque = 0); //-------------------------------------------------------------------------- //! Rem file //-------------------------------------------------------------------------- int rem(const char* path, XrdOucErrInfo& error, const XrdSecEntity* client, const char* opaque = 0); //-------------------------------------------------------------------------- //! Rename file //-------------------------------------------------------------------------- int rename(const char* oldName, const char* newName, XrdOucErrInfo& error, const XrdSecEntity* client, const char* opaqueO = 0, const char* opaqueN = 0); //-------------------------------------------------------------------------- //! Prepare request //-------------------------------------------------------------------------- int prepare(XrdSfsPrep& pargs, XrdOucErrInfo& error, const XrdSecEntity* client = 0); //-------------------------------------------------------------------------- //! Truncate file //-------------------------------------------------------------------------- int truncate(const char* path, XrdSfsFileOffset fileOffset, XrdOucErrInfo& error, const XrdSecEntity* client = 0, const char* opaque = 0); //-------------------------------------------------------------------------- //! getStats function - fake an ok response HERE i.e. do not build and sent //! a request to the real MGM //-------------------------------------------------------------------------- int getStats(char* buff, int blen); //-------------------------------------------------------------------------- //! Process a proto error response and configure //! a collpasing redirection if requested/possible //-------------------------------------------------------------------------- void ProcessError(eos::auth::ResponseProto* resp_func, XrdOucErrInfo& error, const char* path); private: pthread_t proxy_tid; ///< id of the proxy thread zmq::context_t* mZmqContext; ///< ZMQ context zmq::socket_t* mFrontend; ///< proxy socket facing the clients XrdSysMutex mMutexMaster; int mSizePoolSocket; ///< maximum size of the client socket pool eos::common::ConcurrentQueue mPoolSocket; ///< ZMQ client socket pool ///! MGM endpoints to which requests can be dispatched and the corresponding sockets std::pair mBackend; std::string mManagerIp; ///< auth ip address int mPort; ///< port on which the current auth server runs int mCollapsePort; ///< port to which a redirect gets collapsed on int mLogLevel; ///< log level value 0 -7 (LOG_EMERG - LOG_DEBUG) //-------------------------------------------------------------------------- //! Authentication proxy thread which forwards requests form the clients //! to the proper MGM intance. //-------------------------------------------------------------------------- void AuthProxyThread(); //-------------------------------------------------------------------------- //! Authentication proxy thread startup function //-------------------------------------------------------------------------- static void* StartAuthProxyThread(void* pp); //-------------------------------------------------------------------------- //! Send ProtocolBuffer object using ZMQ //! //! @param socket ZMQ socket object //! @param object to be sent over the wire //! //! @return true if object sent successfully, otherwise false //! //-------------------------------------------------------------------------- bool SendProtoBufRequest(zmq::socket_t* socket, google::protobuf::Message* message); //-------------------------------------------------------------------------- //! Get ProtocolBuffer reply object using ZMQ //! //! @param socket ZMQ socket object //! //! @return pointer to received object, the user has the responsibility to //! delete the obtained object //! //-------------------------------------------------------------------------- google::protobuf::Message* GetResponse(zmq::socket_t*& socket); //-------------------------------------------------------------------------- }; //------------------------------------------------------------------------------ //! Global OFS object //------------------------------------------------------------------------------ extern EosAuthOfs* gOFS; EOSAUTHNAMESPACE_END #endif //__EOSAUTH_OFS_HH__ ================================================ FILE: auth_plugin/EosAuthOfsDirectory.cc ================================================ //------------------------------------------------------------------------------ // File: EosAuthOfsDirectory.cc // Author: Elvin-Alin Sindrilau CERN //------------------------------------------------------------------------------ /************************************************************************ * EOS - the CERN Disk Storage System * * Copyright (C) 2013 CERN/Switzerland * * * * This program is free software: you can redistribute it and/or modify * * it under the terms of the GNU General Public License as published by * * the Free Software Foundation, either version 3 of the License, or * * (at your option) any later version. * * * * This program is distributed in the hope that it will be useful, * * but WITHOUT ANY WARRANTY; without even the implied warranty of * * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * * GNU General Public License for more details. * * * * You should have received a copy of the GNU General Public License * * along with this program. If not, see .* ************************************************************************/ /*----------------------------------------------------------------------------*/ #include "EosAuthOfsDirectory.hh" #include "EosAuthOfs.hh" #include "ProtoUtils.hh" /*----------------------------------------------------------------------------*/ #include /*----------------------------------------------------------------------------*/ EOSAUTHNAMESPACE_BEGIN //------------------------------------------------------------------------------ // Constructor //------------------------------------------------------------------------------ EosAuthOfsDirectory::EosAuthOfsDirectory(char* user, int MonID): XrdSfsDirectory(user, MonID), LogId(), mName("") { // empty } //------------------------------------------------------------------------------ // Destructor //------------------------------------------------------------------------------ EosAuthOfsDirectory::~EosAuthOfsDirectory() { // emtpy } //------------------------------------------------------------------------------ // Open a directory //------------------------------------------------------------------------------ int EosAuthOfsDirectory::open(const char* name, const XrdSecClientName* client, const char* opaque) { int retc = SFS_ERROR; eos_debug("dir open name=%s", name); mName = name; // save only for debugging purposes std::ostringstream sstr; // Add the current machine's IP to the uuid in order to avoid collisions in case // we have multiple auth plugins connecting to the same MGM node sstr << gOFS->mManagerIp << ":" << this; RequestProto* req_proto = utils::GetDirOpenRequest(sstr.str(), name, client, opaque, error.getErrUser(), error.getErrMid()); // Compute HMAC for request object if (!utils::ComputeHMAC(req_proto)) { eos_err("error HMAC dir open"); delete req_proto; return retc; } // Get a socket object from the pool zmq::socket_t* socket; gOFS->mPoolSocket.wait_pop(socket); if (gOFS->SendProtoBufRequest(socket, req_proto)) { ResponseProto* resp_open = static_cast(gOFS->GetResponse(socket)); if (resp_open) { retc = resp_open->response(); eos_debug("got response for dir open request"); delete resp_open; } } // Release socket and free memory gOFS->mPoolSocket.push(socket); delete req_proto; return retc; } //------------------------------------------------------------------------------ // Get entry of an open directory //------------------------------------------------------------------------------ const char* EosAuthOfsDirectory::nextEntry() { int retc = SFS_ERROR; eos_debug("dir read name=%s", mName.c_str()); std::ostringstream sstr; sstr << gOFS->mManagerIp << ":" << this; RequestProto* req_proto = utils::GetDirReadRequest(sstr.str()); // Compute HMAC for request object if (!utils::ComputeHMAC(req_proto)) { eos_err("error HMAC dir nextEntry"); delete req_proto; return static_cast(0) ; } // Get a socket object from the pool zmq::socket_t* socket; gOFS->mPoolSocket.wait_pop(socket); if (gOFS->SendProtoBufRequest(socket, req_proto)) { ResponseProto* resp_read = static_cast(gOFS->GetResponse(socket)); if (resp_read) { retc = resp_read->response(); eos_debug("got response for dir read request"); if (retc == SFS_OK) { eos_debug("next entry is: %s", resp_read->message().c_str()); mNextEntry = resp_read->message(); } else { eos_debug("no more entries or error on server side"); } delete resp_read; } } // Release socket and free memory gOFS->mPoolSocket.push(socket); delete req_proto; return (retc ? static_cast(0) : mNextEntry.c_str()); } //------------------------------------------------------------------------------ // Close an open directory //------------------------------------------------------------------------------ int EosAuthOfsDirectory::close() { int retc = SFS_ERROR; eos_debug("dir close name=%s", mName.c_str()); std::ostringstream sstr; sstr << gOFS->mManagerIp << ":" << this; RequestProto* req_proto = utils::GetDirCloseRequest(sstr.str()); // Compute HMAC for request object if (!utils::ComputeHMAC(req_proto)) { eos_err("error dir close"); delete req_proto; return retc; } // Get a socket object from the pool zmq::socket_t* socket; gOFS->mPoolSocket.wait_pop(socket); if (gOFS->SendProtoBufRequest(socket, req_proto)) { ResponseProto* resp_close = static_cast(gOFS->GetResponse(socket)); if (resp_close) { retc = resp_close->response(); eos_debug("got response dir close request"); delete resp_close; } } // Release socket and free memory gOFS->mPoolSocket.push(socket); delete req_proto; return retc; } //------------------------------------------------------------------------------ // Get name of an open directory //------------------------------------------------------------------------------ const char* EosAuthOfsDirectory::FName() { int retc = SFS_ERROR; eos_debug("dir fname"); std::ostringstream sstr; sstr << gOFS->mManagerIp << ":" << this; RequestProto* req_proto = utils::GetDirFnameRequest(sstr.str()); // Compute HMAC for request object if (!utils::ComputeHMAC(req_proto)) { eos_err("error HMAC dir fname"); delete req_proto; return static_cast(0) ; } // Get a socket object from the pool zmq::socket_t* socket; gOFS->mPoolSocket.wait_pop(socket); if (gOFS->SendProtoBufRequest(socket, req_proto)) { ResponseProto* resp_fname = static_cast(gOFS->GetResponse(socket)); if (resp_fname) { retc = resp_fname->response(); eos_debug("got response for dirfname request"); if (retc == SFS_OK) { eos_debug("dir fname is: %s", resp_fname->message().c_str()); mName = resp_fname->message(); } else { eos_debug("dir fname not found or error on server side"); } delete resp_fname; } } // Release socket and free memory gOFS->mPoolSocket.push(socket); delete req_proto; return (retc ? static_cast(0) : mName.c_str()); } EOSAUTHNAMESPACE_END ================================================ FILE: auth_plugin/EosAuthOfsDirectory.hh ================================================ //------------------------------------------------------------------------------ // File: EosAuthOfsDirectory.hh // Author: Elvin-Alin Sindrilau CERN //------------------------------------------------------------------------------ /************************************************************************ * EOS - the CERN Disk Storage System * * Copyright (C) 2013 CERN/Switzerland * * * * This program is free software: you can redistribute it and/or modify * * it under the terms of the GNU General Public License as published by * * the Free Software Foundation, either version 3 of the License, or * * (at your option) any later version. * * * * This program is distributed in the hope that it will be useful, * * but WITHOUT ANY WARRANTY; without even the implied warranty of * * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * * GNU General Public License for more details. * * * * You should have received a copy of the GNU General Public License * * along with this program. If not, see .* ************************************************************************/ #ifndef __EOSAUTH_OFSDIRECTORY__HH__ #define __EOSAUTH_OFSDIRECTORY__HH__ /*----------------------------------------------------------------------------*/ #include "common/Logging.hh" #include "common/Mapping.hh" #include "Namespace.hh" /*----------------------------------------------------------------------------*/ #include #include #include /*----------------------------------------------------------------------------*/ EOSAUTHNAMESPACE_BEGIN //------------------------------------------------------------------------------ //! Class implementing OFS directories //------------------------------------------------------------------------------ class EosAuthOfsDirectory: public XrdSfsDirectory, public eos::common::LogId { public: //-------------------------------------------------------------------------- //! Constructor //-------------------------------------------------------------------------- EosAuthOfsDirectory(char* user = 0, int MonID = 0); //-------------------------------------------------------------------------- //! Destructor //-------------------------------------------------------------------------- ~EosAuthOfsDirectory(); //-------------------------------------------------------------------------- //! Open a directory //-------------------------------------------------------------------------- int open(const char* name, const XrdSecClientName* client = 0, const char* opaque = 0); //-------------------------------------------------------------------------- //! Get entry of an open directory //-------------------------------------------------------------------------- const char* nextEntry(); //-------------------------------------------------------------------------- //! Close directory //-------------------------------------------------------------------------- int close(); //-------------------------------------------------------------------------- //! Get name of an open directory //-------------------------------------------------------------------------- const char* FName(); private: std::string mName; ///< keep directory name just for debugging purposes std::string mNextEntry; ///< next entry value in directory }; EOSAUTHNAMESPACE_END #endif // __EOSAUTH_OFSDIRECTORY_HH__ ================================================ FILE: auth_plugin/EosAuthOfsFile.cc ================================================ //------------------------------------------------------------------------------ // File: EosAuthOfsFile.cc // Author: Elvin-Alin Sindrilau CERN //------------------------------------------------------------------------------ /************************************************************************ * EOS - the CERN Disk Storage System * * Copyright (C) 2013 CERN/Switzerland * * * * This program is free software: you can redistribute it and/or modify * * it under the terms of the GNU General Public License as published by * * the Free Software Foundation, either version 3 of the License, or * * (at your option) any later version. * * * * This program is distributed in the hope that it will be useful, * * but WITHOUT ANY WARRANTY; without even the implied warranty of * * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * * GNU General Public License for more details. * * * * You should have received a copy of the GNU General Public License * * along with this program. If not, see .* ************************************************************************/ /*----------------------------------------------------------------------------*/ #include "EosAuthOfsFile.hh" #include "EosAuthOfs.hh" #include "ProtoUtils.hh" /*----------------------------------------------------------------------------*/ #include /*----------------------------------------------------------------------------*/ EOSAUTHNAMESPACE_BEGIN //------------------------------------------------------------------------------ // Constructor //------------------------------------------------------------------------------ EosAuthOfsFile::EosAuthOfsFile(char* user, int MonID): XrdSfsFile(user, MonID), eos::common::LogId(), mName("") { // emtpy } //------------------------------------------------------------------------------ // Destructor //------------------------------------------------------------------------------ EosAuthOfsFile::~EosAuthOfsFile() { // empty } //------------------------------------------------------------------------------ // Open a file //------------------------------------------------------------------------------ int EosAuthOfsFile::open(const char* fileName, XrdSfsFileOpenMode openMode, mode_t createMode, const XrdSecEntity* client, const char* opaque) { int retc = SFS_ERROR; eos_debug("file open name=%s opaque=%s", fileName, opaque); mName = fileName; // Save file pointer value which is used as a key on the MGM instance std::ostringstream sstr; // Add the current machine's IP to the uuid in order to avoid collisions in case // we have multiple auth plugins connecting to the same MGM node sstr << gOFS->mManagerIp << ":" << this; RequestProto* req_proto = utils::GetFileOpenRequest(sstr.str(), fileName, openMode, createMode, client, opaque, error.getErrUser(), error.getErrMid()); // Compute HMAC for request object if (!utils::ComputeHMAC(req_proto)) { eos_err("error HMAC file open"); delete req_proto; return retc; } // Get a socket object from the pool zmq::socket_t* socket; gOFS->mPoolSocket.wait_pop(socket); if (gOFS->SendProtoBufRequest(socket, req_proto)) { ResponseProto* resp_open = static_cast(gOFS->GetResponse( socket)); if (resp_open) { retc = resp_open->response(); eos_debug("got response for file open request: %i", retc); if (resp_open->has_error()) { error.setErrInfo(resp_open->error().code(), resp_open->error().message().c_str()); } delete resp_open; } } // Release socket and free memory gOFS->mPoolSocket.push(socket); delete req_proto; return retc; } //------------------------------------------------------------------------------ // Read function //------------------------------------------------------------------------------ XrdSfsXferSize EosAuthOfsFile::read(XrdSfsFileOffset offset, char* buffer, XrdSfsXferSize length) { int retc = 0; // this means read 0 bytes and NOT SFS_OK :) eos_debug("read off=%li len=%i", (long long)offset, (int)length); std::ostringstream sstr; sstr << gOFS->mManagerIp << ":" << this; eos_debug("fptr=%s, off=%li, len=%i", sstr.str().c_str(), offset, length); RequestProto* req_proto = utils::GetFileReadRequest(sstr.str(), offset, length); // Compute HMAC for request object if (!utils::ComputeHMAC(req_proto)) { eos_err("error HMAC file read"); delete req_proto; return retc; } // Get a socket object from the pool zmq::socket_t* socket; gOFS->mPoolSocket.wait_pop(socket); if (gOFS->SendProtoBufRequest(socket, req_proto)) { ResponseProto* resp_fread = static_cast(gOFS->GetResponse( socket)); if (resp_fread) { retc = resp_fread->response(); if (retc && resp_fread->has_message()) { buffer = static_cast(memcpy((void*)buffer, resp_fread->message().c_str(), resp_fread->message().length())); } delete resp_fread; } } // Release socket and free memory gOFS->mPoolSocket.push(socket); delete req_proto; return retc; } //------------------------------------------------------------------------------ // Write function //------------------------------------------------------------------------------ XrdSfsXferSize EosAuthOfsFile::write(XrdSfsFileOffset offset, const char* buffer, XrdSfsXferSize length) { int retc = 0; // this means written 0 bytes and NOT SFS_OK :) eos_debug("write off=%ll len=%i", offset, length); std::ostringstream sstr; sstr << gOFS->mManagerIp << ":" << this; eos_debug("fptr=%s, off=%li, len=%i", sstr.str().c_str(), offset, length); RequestProto* req_proto = utils::GetFileWriteRequest(sstr.str(), offset, buffer, length); // Compute HMAC for request object if (!utils::ComputeHMAC(req_proto)) { eos_err("error HMAC file write"); delete req_proto; return retc; } // Get a socket object from the pool zmq::socket_t* socket; gOFS->mPoolSocket.wait_pop(socket); if (gOFS->SendProtoBufRequest(socket, req_proto)) { ResponseProto* resp_fwrite = static_cast(gOFS->GetResponse( socket)); if (resp_fwrite) { retc = resp_fwrite->response(); eos_debug("got response for file write request"); delete resp_fwrite; } } // Release socket and free memory gOFS->mPoolSocket.push(socket); delete req_proto; return retc; } //------------------------------------------------------------------------------ // Get name of file //------------------------------------------------------------------------------ const char* EosAuthOfsFile::FName() { int retc = SFS_ERROR; eos_debug("file fname"); std::ostringstream sstr; sstr << gOFS->mManagerIp << ":" << this; eos_debug("file pointer: %s", sstr.str().c_str()); RequestProto* req_proto = utils::GetFileFnameRequest(sstr.str()); // Compute HMAC for request object if (!utils::ComputeHMAC(req_proto)) { eos_err("error HMAC file name"); delete req_proto; return ""; } // Get a socket object from the pool zmq::socket_t* socket; gOFS->mPoolSocket.wait_pop(socket); if (gOFS->SendProtoBufRequest(socket, req_proto)) { ResponseProto* resp_fname = static_cast(gOFS->GetResponse( socket)); if (resp_fname) { retc = resp_fname->response(); eos_debug("got response for filefname request"); if (retc == SFS_OK) { eos_debug("file fname is: %s", resp_fname->message().c_str()); mName = resp_fname->message(); } else { eos_debug("file fname not found or error on server side"); } delete resp_fname; } } // Release socket and free memory gOFS->mPoolSocket.push(socket); delete req_proto; return (retc ? static_cast(0) : (mName.empty() ? "" : mName.c_str())); } //------------------------------------------------------------------------------ // Stat function //------------------------------------------------------------------------------ int EosAuthOfsFile::stat(struct stat* buf) { int retc = SFS_ERROR; eos_debug("stat file name=%s", mName.c_str()); std::ostringstream sstr; sstr << gOFS->mManagerIp << ":" << this; eos_debug("file pointer: %s", sstr.str().c_str()); RequestProto* req_proto = utils::GetFileStatRequest(sstr.str()); // Compute HMAC for request object if (!utils::ComputeHMAC(req_proto)) { eos_err("error HMAC file stat"); delete req_proto; return retc; } // Get a socket object from the pool zmq::socket_t* socket; gOFS->mPoolSocket.wait_pop(socket); if (gOFS->SendProtoBufRequest(socket, req_proto)) { ResponseProto* resp_fstat = static_cast(gOFS->GetResponse( socket)); if (resp_fstat) { retc = resp_fstat->response(); buf = static_cast(memcpy((void*)buf, resp_fstat->message().c_str(), sizeof(struct stat))); eos_debug("got response for fstat request: %i", retc); delete resp_fstat; } } else { eos_err("file stat - unable to send request"); memset(buf, 0, sizeof(struct stat)); } // Release socket and free memory gOFS->mPoolSocket.push(socket); delete req_proto; return retc; } //------------------------------------------------------------------------------ //! Close file //------------------------------------------------------------------------------ int EosAuthOfsFile::close() { int retc = SFS_ERROR; eos_debug("close"); std::ostringstream sstr; sstr << gOFS->mManagerIp << ":" << this; eos_debug("file pointer: %s", sstr.str().c_str()); RequestProto* req_proto = utils::GetFileCloseRequest(sstr.str()); // Compute HMAC for request object if (!utils::ComputeHMAC(req_proto)) { eos_err("error HMAC file close"); delete req_proto; return retc; } // Get a socket object from the pool zmq::socket_t* socket; gOFS->mPoolSocket.wait_pop(socket); if (gOFS->SendProtoBufRequest(socket, req_proto)) { ResponseProto* resp_close = static_cast(gOFS->GetResponse( socket)); if (resp_close) { retc = resp_close->response(); eos_debug("got response for file close request: %i", retc); delete resp_close; } } // Release socket and free memory gOFS->mPoolSocket.push(socket); delete req_proto; return retc; } //------------------------------------------------------------------------------ //!!!!!!!!! THE FOLLOWING OPERATIONS ARE NOT SUPPORTED !!!!!!!!! //------------------------------------------------------------------------------ //------------------------------------------------------------------------------ // fctl fakes ok (not supported) //------------------------------------------------------------------------------ int EosAuthOfsFile::fctl(int, const char*, XrdOucErrInfo&) { return 0; } //------------------------------------------------------------------------------ // Return mmap address (not supported) //------------------------------------------------------------------------------ int EosAuthOfsFile::getMmap(void** Addr, off_t& Size) { if (Addr) { Addr = 0; } Size = 0; return SFS_OK; } //------------------------------------------------------------------------------ // File pre-read fakes ok (not supported) //------------------------------------------------------------------------------ int EosAuthOfsFile::read(XrdSfsFileOffset fileOffset, XrdSfsXferSize preread_sz) { return SFS_OK; } //------------------------------------------------------------------------------ // File read in async mode (not supported) //------------------------------------------------------------------------------ int EosAuthOfsFile::read(XrdSfsAio* aioparm) { static const char* epname = "read"; return Emsg(epname, error, EOPNOTSUPP, "read", mName.c_str()); } //------------------------------------------------------------------------------ // File write in async mode (not supported) //------------------------------------------------------------------------------ int EosAuthOfsFile::write(XrdSfsAio* aiop) { static const char* epname = "write"; return Emsg(epname, error, EOPNOTSUPP, "write", mName.c_str()); } //------------------------------------------------------------------------------ // File sync (not supported) //------------------------------------------------------------------------------ int EosAuthOfsFile::sync() { static const char* epname = "sync"; return Emsg(epname, error, EOPNOTSUPP, "sync", mName.c_str()); } //------------------------------------------------------------------------------ // File async sync (not supported) //------------------------------------------------------------------------------ int EosAuthOfsFile::sync(XrdSfsAio* aiop) { static const char* epname = "sync"; return Emsg(epname, error, EOPNOTSUPP, "sync", mName.c_str()); } //------------------------------------------------------------------------------ // File truncate (not supported) //------------------------------------------------------------------------------ int EosAuthOfsFile::truncate(XrdSfsFileOffset flen) { static const char* epname = "trunc"; return Emsg(epname, error, EOPNOTSUPP, "truncate", mName.c_str()); } //------------------------------------------------------------------------------ // Get checksum info (returns nothing - not supported) //------------------------------------------------------------------------------ int EosAuthOfsFile::getCXinfo(char cxtype[4], int& cxrsz) { return cxrsz = 0; } //------------------------------------------------------------------------------ // Create error message //------------------------------------------------------------------------------ int EosAuthOfsFile::Emsg(const char* pfx, XrdOucErrInfo& einfo, int ecode, const char* op, const char* target) { char* etext, buffer[4096], unkbuff[64]; // Get the reason for the error if (ecode < 0) { ecode = -ecode; } if (!(etext = strerror(ecode))) { sprintf(unkbuff, "reason unknown (%d)", ecode); etext = unkbuff; } // Format the error message snprintf(buffer, sizeof(buffer), "Unable to %s %s; %s", op, target, etext); eos_err("Unable to %s %s; %s", op, target, etext); // Place the error message in the error object and return einfo.setErrInfo(ecode, buffer); return SFS_ERROR; } EOSAUTHNAMESPACE_END ================================================ FILE: auth_plugin/EosAuthOfsFile.hh ================================================ //------------------------------------------------------------------------------ // File: EosAuthOfsFile.hh // Author: Elvin-Alin Sindrilau CERN //------------------------------------------------------------------------------ /************************************************************************ * EOS - the CERN Disk Storage System * * Copyright (C) 2013 CERN/Switzerland * * * * This program is free software: you can redistribute it and/or modify * * it under the terms of the GNU General Public License as published by * * the Free Software Foundation, either version 3 of the License, or * * (at your option) any later version. * * * * This program is distributed in the hope that it will be useful, * * but WITHOUT ANY WARRANTY; without even the implied warranty of * * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * * GNU General Public License for more details. * * * * You should have received a copy of the GNU General Public License * * along with this program. If not, see .* ************************************************************************/ #ifndef __EOSAUTH_OFSFILE__HH__ #define __EOSAUTH_OFSFILE__HH__ /*----------------------------------------------------------------------------*/ #include "common/Logging.hh" #include "common/Mapping.hh" #include "Namespace.hh" /*----------------------------------------------------------------------------*/ #include #include /*----------------------------------------------------------------------------*/ EOSAUTHNAMESPACE_BEGIN //------------------------------------------------------------------------------ //! Class implementing OFS files //------------------------------------------------------------------------------ class EosAuthOfsFile: public XrdSfsFile, public eos::common::LogId { public: //-------------------------------------------------------------------------- //! Constructor //-------------------------------------------------------------------------- EosAuthOfsFile(char* user = 0, int MonID = 0); //-------------------------------------------------------------------------- //! Destructor //-------------------------------------------------------------------------- ~EosAuthOfsFile(); //-------------------------------------------------------------------------- //! Open a file //-------------------------------------------------------------------------- int open(const char* fileName, XrdSfsFileOpenMode openMode, mode_t createMode, const XrdSecEntity* client, const char* opaque = 0); //-------------------------------------------------------------------------- //! Read function //-------------------------------------------------------------------------- virtual XrdSfsXferSize read(XrdSfsFileOffset offset, char* buffer, XrdSfsXferSize length); //-------------------------------------------------------------------------- //! Write function //-------------------------------------------------------------------------- virtual XrdSfsXferSize write(XrdSfsFileOffset offset, const char* buffer, XrdSfsXferSize length); //-------------------------------------------------------------------------- //! Stat function //-------------------------------------------------------------------------- virtual int stat(struct stat* buf); //-------------------------------------------------------------------------- //! Close file //-------------------------------------------------------------------------- int close(); //-------------------------------------------------------------------------- //! Get name of file //-------------------------------------------------------------------------- const char* FName(); //------------------------------------------------------------------------------ //!!!!!!!!! THE FOLLOWING OPERATIONS ARE NOT SUPPORTED !!!!!!!!! //------------------------------------------------------------------------------ //-------------------------------------------------------------------------- //! fctl fakes ok (not supported) //-------------------------------------------------------------------------- int fctl(int, const char*, XrdOucErrInfo&); //-------------------------------------------------------------------------- //! Return mmap address (not supported) //-------------------------------------------------------------------------- int getMmap(void** Addr, off_t& Size); //-------------------------------------------------------------------------- //! File pre-read fakes ok (not supported) //-------------------------------------------------------------------------- int read(XrdSfsFileOffset fileOffset, XrdSfsXferSize preread_sz); //-------------------------------------------------------------------------- //! File read in async mode (not supported) //-------------------------------------------------------------------------- int read(XrdSfsAio* aioparm); //-------------------------------------------------------------------------- //! File write in async mode (not supported) //-------------------------------------------------------------------------- int write(XrdSfsAio* aiop); //-------------------------------------------------------------------------- //! File sync (not supported) //-------------------------------------------------------------------------- int sync(); //-------------------------------------------------------------------------- //! File async sync (not supported) //-------------------------------------------------------------------------- int sync(XrdSfsAio* aiop); //-------------------------------------------------------------------------- //! File truncate (not supported) //-------------------------------------------------------------------------- int truncate(XrdSfsFileOffset flen); //-------------------------------------------------------------------------- //! get checksum info (returns nothing - not supported) //-------------------------------------------------------------------------- int getCXinfo(char cxtype[4], int& cxrsz); private: std::string mName; ///< file name //-------------------------------------------------------------------------- //! Create an error message for a file object //! //! @param pfx message prefix value //! @param einfo error text/code object //! @param ecode error code //! @param op name of the operation performed //! @param target target of the operation e.g. file name etc. //! //! @return SFS_ERROR in all cases //! //! This routines prints also an error message into the EOS log. //! //-------------------------------------------------------------------------- int Emsg(const char* pfx, XrdOucErrInfo& einfo, int ecode, const char* op, const char* target); }; EOSAUTHNAMESPACE_END #endif // __EOSAUTH_OFSFILE_HH__ ================================================ FILE: auth_plugin/Namespace.hh ================================================ //------------------------------------------------------------------------------ // File: Namespace.hh // Author: Elvin-Alin Sindrialru CERN //------------------------------------------------------------------------------ /************************************************************************ * EOS - the CERN Disk Storage System * * Copyright (C) 2013 CERN/Switzerland * * * * This program is free software: you can redistribute it and/or modify * * it under the terms of the GNU General Public License as published by * * the Free Software Foundation, either version 3 of the License, or * * (at your option) any later version. * * * * This program is distributed in the hope that it will be useful, * * but WITHOUT ANY WARRANTY; without even the implied warranty of * * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * * GNU General Public License for more details. * * * * You should have received a copy of the GNU General Public License * * along with this program. If not, see .* ************************************************************************/ #ifndef __EOSAUTH_NAMESPACE_HH__ #define __EOSAUTH_NAMESPACE_HH__ #define EOSAUTHNAMESPACE_BEGIN namespace eos { namespace auth { #define EOSAUTHNAMESPACE_END }} #endif ================================================ FILE: auth_plugin/ProtoUtils.cc ================================================ //------------------------------------------------------------------------------ // File: ProtoUtils.cc // Author: Elvin-Alin Sindrilaru CERN //------------------------------------------------------------------------------ /************************************************************************ * EOS - the CERN Disk Storage System * * Copyright (C) 2013 CERN/Switzerland * * * * This program is free software: you can redistribute it and/or modify * * it under the terms of the GNU General Public License as published by * * the Free Software Foundation, either version 3 of the License, or * * (at your option) any later version. * * * * This program is distributed in the hope that it will be useful, * * but WITHOUT ANY WARRANTY; without even the implied warranty of * * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * * GNU General Public License for more details. * * * * You should have received a copy of the GNU General Public License * * along with this program. If not, see .* ************************************************************************/ #include "ProtoUtils.hh" #include "common/Logging.hh" #include "common/SymKeys.hh" #include #include #include #include #include #include #include EOSAUTHNAMESPACE_BEGIN //------------------------------------------------------------------------------ // Convert XrdSecEntity object to ProtocolBuffers representation //------------------------------------------------------------------------------ void utils::ConvertToProtoBuf(const XrdSecEntity* obj, XrdSecEntityProto*& proto) { proto->set_prot(obj->prot); if (obj->name) { proto->set_name(obj->name); } else { proto->set_name(""); } if (obj->host) { proto->set_host(obj->host); } else { proto->set_host(""); } if (obj->vorg) { proto->set_vorg(obj->vorg); } else { proto->set_vorg(""); } if (obj->role) { proto->set_role(obj->role); } else { proto->set_role(""); } if (obj->grps) { proto->set_grps(obj->grps); } else { proto->set_grps(""); } if (obj->endorsements) { proto->set_endorsements(obj->endorsements); } else { proto->set_endorsements(""); } if (obj->creds) { proto->set_creds(obj->creds); } else { proto->set_creds(""); } proto->set_credslen(obj->credslen); if (obj->moninfo) { proto->set_moninfo(obj->moninfo); } else { proto->set_moninfo(""); } if (obj->tident) { proto->set_tident(obj->tident); } else { proto->set_tident(""); } } //------------------------------------------------------------------------------ // Convert XrdOucErrInfo object to ProtocolBuffers representation //------------------------------------------------------------------------------ void utils::ConvertToProtoBuf(XrdOucErrInfo* obj, XrdOucErrInfoProto*& proto) { proto->set_user(obj->getErrUser()); proto->set_code(obj->getErrInfo()); proto->set_message(obj->getErrText()); } //------------------------------------------------------------------------------ // Convert XrSfsFSctl object to ProtocolBuffers representation //------------------------------------------------------------------------------ void utils::ConvertToProtoBuf(const XrdSfsFSctl* obj, XrdSfsFSctlProto*& proto) { if (obj->Arg1) { proto->set_arg1(obj->Arg1); } if (obj->Arg2) { proto->set_arg2(obj->Arg2); } proto->set_arg1len(obj->Arg1Len); proto->set_arg2len(obj->Arg2Len); } //------------------------------------------------------------------------------ // Convert XrSfsPrep object to ProtocolBuffers representation //------------------------------------------------------------------------------ void utils::ConvertToProtoBuf(const XrdSfsPrep* obj, XrdSfsPrepProto*& proto) { proto->set_reqid(obj->reqid ? obj->reqid : ""); proto->set_notify(obj->notify ? obj->notify : ""); proto->set_opts(obj->opts); XrdOucTList* next_path = obj->paths; XrdOucTList* next_oinfo = obj->oinfo; while (next_path && next_oinfo) { if (next_path->text && next_oinfo->text) { proto->add_paths(next_path->text); next_path = next_path->next; proto->add_oinfo(next_oinfo->text); next_oinfo = next_oinfo->next; } } } //------------------------------------------------------------------------------ // Get XrdSecEntity object from protocol buffer object //------------------------------------------------------------------------------ XrdSecEntity* utils::GetXrdSecEntity(const XrdSecEntityProto& proto_obj) { XrdSecEntity* obj = new XrdSecEntity(); strncpy(obj->prot, proto_obj.prot().c_str(), XrdSecPROTOIDSIZE - 1); obj->prot[XrdSecPROTOIDSIZE - 1] = '\0'; obj->name = strdup(proto_obj.name().c_str()); obj->host = strdup(proto_obj.host().c_str()); obj->vorg = strdup(proto_obj.vorg().c_str()); obj->role = strdup(proto_obj.role().c_str()); obj->grps = strdup(proto_obj.grps().c_str()); obj->endorsements = strdup(proto_obj.endorsements().c_str()); obj->creds = strdup(proto_obj.creds().c_str()); obj->credslen = proto_obj.credslen(); obj->moninfo = strdup(proto_obj.moninfo().c_str()); obj->tident = strdup(proto_obj.tident().c_str()); return obj; } //------------------------------------------------------------------------------ // Delete XrdSecEntity object //------------------------------------------------------------------------------ void utils::DeleteXrdSecEntity(XrdSecEntity*& obj) { free(obj->name); free(obj->host); free(obj->vorg); free(obj->role); free(obj->grps); free(obj->endorsements); free(obj->creds); free(obj->moninfo); free(const_cast(obj->tident)); delete obj; obj = 0; } //------------------------------------------------------------------------------ // Get XrdSfsPrep object from protocol buffer object //------------------------------------------------------------------------------ XrdSfsPrep* utils::GetXrdSfsPrep(const eos::auth::XrdSfsPrepProto& proto_obj) { XrdSfsPrep* obj = new XrdSfsPrep(); obj->reqid = ((proto_obj.reqid() == "") ? 0 : strdup( proto_obj.reqid().c_str())); obj->notify = ((proto_obj.notify() == "") ? 0 : strdup( proto_obj.notify().c_str())); obj->opts = proto_obj.opts(); obj->paths = obj->oinfo = 0; XrdOucTList*& next_paths = obj->paths; XrdOucTList*& next_oinfo = obj->oinfo; // The number of paths and oinfo should match if (proto_obj.paths_size() != proto_obj.oinfo_size()) { return obj; } XrdOucTList* previousPath = obj->paths; XrdOucTList* previousOinfo = obj->oinfo; for (int i = 0; i < proto_obj.paths_size(); i++) { auto currentPath = new XrdOucTList(proto_obj.paths(i).c_str()); if (next_paths) { previousPath->next = currentPath; } else { next_paths = currentPath; } previousPath = currentPath; currentPath = 0; auto currentOinfo = new XrdOucTList(proto_obj.oinfo(i).c_str()); if (next_oinfo) { previousOinfo->next = currentOinfo; } else { next_oinfo = currentOinfo; } previousOinfo = currentOinfo; currentOinfo = 0; } return obj; } //------------------------------------------------------------------------------ // Delete DeleteXrdSfsPrep object //------------------------------------------------------------------------------ void utils::DeleteXrdSfsPrep(XrdSfsPrep*& obj) { if (obj->reqid) { free(obj->reqid); } if (obj->notify != nullptr) { free(obj->notify); } XrdOucTList* currentPath = obj->paths; while (currentPath != nullptr) { XrdOucTList* nextPath = currentPath->next; delete currentPath; currentPath = nextPath; } XrdOucTList* currentOinfo = obj->oinfo; while (currentOinfo != nullptr) { XrdOucTList* nextOinfo = currentOinfo->next; delete currentOinfo; currentOinfo = nextOinfo; } delete obj; } //------------------------------------------------------------------------------ // Get XrdOucErrInfo object from protocol buffer object //------------------------------------------------------------------------------ XrdOucErrInfo* utils::GetXrdOucErrInfo(const eos::auth::XrdOucErrInfoProto& proto_obj) { XrdOucErrInfo* obj = new XrdOucErrInfo(proto_obj.user().c_str()); obj->setErrInfo(proto_obj.code(), proto_obj.message().c_str()); return obj; } //------------------------------------------------------------------------------ // Get XrdSfsFSctl object from protocol buffer object //------------------------------------------------------------------------------ XrdSfsFSctl* utils::GetXrdSfsFSctl(const eos::auth::XrdSfsFSctlProto& proto_obj) { XrdSfsFSctl* obj = new XrdSfsFSctl(); obj->Arg1 = static_cast(0); obj->Arg2 = static_cast(0); obj->Arg1Len = proto_obj.arg1len(); obj->Arg2Len = proto_obj.arg2len(); if (proto_obj.has_arg1()) { obj->Arg1 = const_cast(strdup(proto_obj.arg1().c_str())); } if (proto_obj.has_arg2()) { obj->Arg2 = const_cast(strdup(proto_obj.arg2().c_str())); } return obj; } //------------------------------------------------------------------------------ // Delete XrdSfsFSctl object //------------------------------------------------------------------------------ void utils::DeleteXrdSfsFSctl(XrdSfsFSctl*& obj) { free((void*)obj->Arg1); free((void*)obj->Arg2); delete obj; obj = 0; } //------------------------------------------------------------------------------ // Compute HMAC value of the RequestProto object and append it to the // object using the required field hmac //------------------------------------------------------------------------------ bool utils::ComputeHMAC(RequestProto*& req) { std::string smsg; req->set_hmac(""); // set it temporarily, we update it later if (!req->SerializeToString(&smsg)) { eos_static_err("unable to serialize message to string for HMAC computation"); return false; } std::string hmac = eos::common::SymKey::HmacSha1(smsg); XrdOucString base64hmac; bool do_encoding = eos::common::SymKey::Base64Encode((char*)hmac.c_str(), hmac.length(), base64hmac); if (!do_encoding) { eos_static_err("unable to do base64encoding on HMAC"); return do_encoding; } if (EOS_LOGS_DEBUG) { std::string json_out; (void) google::protobuf::util::MessageToJsonString(*req, &json_out); eos_static_debug("request=\"%s\" hmac=\"%s\" hmac_size=%i", json_out.c_str(), base64hmac.c_str(), base64hmac.length()); } // Update the HMAC value req->set_hmac(base64hmac.c_str()); return true; } //------------------------------------------------------------------------------ // Create StatProto object //------------------------------------------------------------------------------ RequestProto* utils::GetStatRequest(RequestProto_OperationType type, const char* path, XrdOucErrInfo& error, const XrdSecEntity* client, const char* opaque) { eos::auth::RequestProto* req_proto = new eos::auth::RequestProto(); eos::auth::StatProto* stat_proto = req_proto->mutable_stat(); eos::auth::XrdOucErrInfoProto* xoei_proto = stat_proto->mutable_error(); eos::auth::XrdSecEntityProto* xse_proto = stat_proto->mutable_client(); stat_proto->set_path(path); ConvertToProtoBuf(&error, xoei_proto); ConvertToProtoBuf(client, xse_proto); if (opaque) { stat_proto->set_opaque(opaque); } // This can either be a stat to get a struct stat or just to retrieve the // mode of the file/directory req_proto->set_type(type); return req_proto; } //------------------------------------------------------------------------------ // Create fsctl request ProtocolBuffer object //------------------------------------------------------------------------------ RequestProto* utils::GetFsctlRequest(const int cmd, const char* args, XrdOucErrInfo& error, const XrdSecEntity* client) { eos::auth::RequestProto* req_proto = new eos::auth::RequestProto(); eos::auth::FsctlProto* fsctl_proto = req_proto->mutable_fsctl1(); eos::auth::XrdOucErrInfoProto* xoei_proto = fsctl_proto->mutable_error(); eos::auth::XrdSecEntityProto* xse_proto = fsctl_proto->mutable_client(); fsctl_proto->set_cmd(cmd); fsctl_proto->set_args(args); ConvertToProtoBuf(&error, xoei_proto); ConvertToProtoBuf(client, xse_proto); req_proto->set_type(RequestProto_OperationType_FSCTL1); return req_proto; } //------------------------------------------------------------------------------ // Create FSctl request ProtocolBuffer object //------------------------------------------------------------------------------ RequestProto* utils::GetFSctlRequest(const int cmd, XrdSfsFSctl& args, XrdOucErrInfo& error, const XrdSecEntity* client) { eos::auth::RequestProto* req_proto = new eos::auth::RequestProto(); eos::auth::FSctlProto* fsctl_proto = req_proto->mutable_fsctl2(); eos::auth::XrdSfsFSctlProto* args_proto = fsctl_proto->mutable_args(); eos::auth::XrdOucErrInfoProto* xoei_proto = fsctl_proto->mutable_error(); eos::auth::XrdSecEntityProto* xse_proto = fsctl_proto->mutable_client(); fsctl_proto->set_cmd(cmd); ConvertToProtoBuf(&args, args_proto); ConvertToProtoBuf(&error, xoei_proto); ConvertToProtoBuf(client, xse_proto); req_proto->set_type(RequestProto_OperationType_FSCTL2); return req_proto; } //------------------------------------------------------------------------------ // Create chmod request ProtocolBuffer object //------------------------------------------------------------------------------ RequestProto* utils::GetChmodRequest(const char* path, int mode, XrdOucErrInfo& error, const XrdSecEntity* client, const char* opaque) { eos::auth::RequestProto* req_proto = new eos::auth::RequestProto(); eos::auth::ChmodProto* chmod_proto = req_proto->mutable_chmod(); eos::auth::XrdOucErrInfoProto* xoei_proto = chmod_proto->mutable_error(); eos::auth::XrdSecEntityProto* xse_proto = chmod_proto->mutable_client(); chmod_proto->set_path(path); chmod_proto->set_mode(mode); ConvertToProtoBuf(&error, xoei_proto); ConvertToProtoBuf(client, xse_proto); if (opaque) { chmod_proto->set_opaque(opaque); } req_proto->set_type(RequestProto_OperationType_CHMOD); return req_proto; } //------------------------------------------------------------------------------ // Create chksum request ProtocolBuffer object //------------------------------------------------------------------------------ RequestProto* utils::GetChksumRequest(XrdSfsFileSystem::csFunc func, const char* csname, const char* inpath, XrdOucErrInfo& error, const XrdSecEntity* client, const char* opaque) { eos::auth::RequestProto* req_proto = new eos::auth::RequestProto(); eos::auth::ChksumProto* chksum_proto = req_proto->mutable_chksum(); eos::auth::XrdOucErrInfoProto* xoei_proto = chksum_proto->mutable_error(); chksum_proto->set_func(func); chksum_proto->set_csname(csname); if (inpath) { chksum_proto->set_path(inpath); } else { chksum_proto->set_path(""); } ConvertToProtoBuf(&error, xoei_proto); if (client) { eos::auth::XrdSecEntityProto* xse_proto = chksum_proto->mutable_client(); ConvertToProtoBuf(client, xse_proto); } if (opaque) { chksum_proto->set_opaque(opaque); } req_proto->set_type(RequestProto_OperationType_CHKSUM); return req_proto; } //------------------------------------------------------------------------------ // Create exitst request ProtocolBuffer object //------------------------------------------------------------------------------ RequestProto* utils::GetExistsRequest(const char* path, XrdOucErrInfo& error, const XrdSecEntity* client, const char* opaque) { eos::auth::RequestProto* req_proto = new eos::auth::RequestProto(); eos::auth::ExistsProto* exists_proto = req_proto->mutable_exists(); eos::auth::XrdOucErrInfoProto* xoei_proto = exists_proto->mutable_error(); eos::auth::XrdSecEntityProto* xse_proto = exists_proto->mutable_client(); exists_proto->set_path(path); ConvertToProtoBuf(&error, xoei_proto); ConvertToProtoBuf(client, xse_proto); if (opaque) { exists_proto->set_opaque(opaque); } req_proto->set_type(RequestProto_OperationType_EXISTS); return req_proto; } //------------------------------------------------------------------------------ // Create mkdir request ProtocolBuffer object //------------------------------------------------------------------------------ RequestProto* utils::GetMkdirRequest(const char* path, int mode, XrdOucErrInfo& error, const XrdSecEntity* client, const char* opaque) { eos::auth::RequestProto* req_proto = new eos::auth::RequestProto(); eos::auth::MkdirProto* mkdir_proto = req_proto->mutable_mkdir(); eos::auth::XrdOucErrInfoProto* xoei_proto = mkdir_proto->mutable_error(); eos::auth::XrdSecEntityProto* xse_proto = mkdir_proto->mutable_client(); mkdir_proto->set_path(path); mkdir_proto->set_mode(mode); ConvertToProtoBuf(&error, xoei_proto); ConvertToProtoBuf(client, xse_proto); if (opaque) { mkdir_proto->set_opaque(opaque); } req_proto->set_type(RequestProto_OperationType_MKDIR); return req_proto; } //------------------------------------------------------------------------------ // Create remdir request ProtocolBuffer object //------------------------------------------------------------------------------ RequestProto* utils::GetRemdirRequest(const char* path, XrdOucErrInfo& error, const XrdSecEntity* client, const char* opaque) { eos::auth::RequestProto* req_proto = new eos::auth::RequestProto(); eos::auth::RemdirProto* remdir_proto = req_proto->mutable_remdir(); eos::auth::XrdOucErrInfoProto* xoei_proto = remdir_proto->mutable_error(); eos::auth::XrdSecEntityProto* xse_proto = remdir_proto->mutable_client(); remdir_proto->set_path(path); ConvertToProtoBuf(&error, xoei_proto); ConvertToProtoBuf(client, xse_proto); if (opaque) { remdir_proto->set_opaque(opaque); } req_proto->set_type(RequestProto_OperationType_REMDIR); return req_proto; } //------------------------------------------------------------------------------ // Create rem request ProtocolBuffer object //------------------------------------------------------------------------------ RequestProto* utils::GetRemRequest(const char* path, XrdOucErrInfo& error, const XrdSecEntity* client, const char* opaque) { eos::auth::RequestProto* req_proto = new eos::auth::RequestProto(); eos::auth::RemProto* rem_proto = req_proto->mutable_rem(); eos::auth::XrdOucErrInfoProto* xoei_proto = rem_proto->mutable_error(); eos::auth::XrdSecEntityProto* xse_proto = rem_proto->mutable_client(); rem_proto->set_path(path); ConvertToProtoBuf(&error, xoei_proto); ConvertToProtoBuf(client, xse_proto); if (opaque) { rem_proto->set_opaque(opaque); } req_proto->set_type(RequestProto_OperationType_REM); return req_proto; } //------------------------------------------------------------------------------ // Create rename request ProtocolBuffer object //------------------------------------------------------------------------------ RequestProto* utils::GetRenameRequest(const char* oldName, const char* newName, XrdOucErrInfo& error, const XrdSecEntity* client, const char* opaqueO, const char* opaqueN) { eos::auth::RequestProto* req_proto = new eos::auth::RequestProto(); eos::auth::RenameProto* rename_proto = req_proto->mutable_rename(); eos::auth::XrdOucErrInfoProto* xoei_proto = rename_proto->mutable_error(); eos::auth::XrdSecEntityProto* xse_proto = rename_proto->mutable_client(); rename_proto->set_oldname(oldName); rename_proto->set_newname(newName); ConvertToProtoBuf(&error, xoei_proto); ConvertToProtoBuf(client, xse_proto); if (opaqueO) { rename_proto->set_opaqueo(opaqueO); } if (opaqueN) { rename_proto->set_opaqueo(opaqueN); } req_proto->set_type(RequestProto_OperationType_RENAME); return req_proto; } //-------------------------------------------------------------------------- // Create prepare request ProtocolBuffer object //-------------------------------------------------------------------------- RequestProto* utils::GetPrepareRequest(XrdSfsPrep& pargs, XrdOucErrInfo& error, const XrdSecEntity* client) { eos::auth::RequestProto* req_proto = new eos::auth::RequestProto(); eos::auth::PrepareProto* prepare_proto = req_proto->mutable_prepare(); eos::auth::XrdSfsPrepProto* xsp_proto = prepare_proto->mutable_pargs(); eos::auth::XrdOucErrInfoProto* xoei_proto = prepare_proto->mutable_error(); eos::auth::XrdSecEntityProto* xse_proto = prepare_proto->mutable_client(); ConvertToProtoBuf(&pargs, xsp_proto); ConvertToProtoBuf(&error, xoei_proto); ConvertToProtoBuf(client, xse_proto); req_proto->set_type(RequestProto_OperationType_PREPARE); return req_proto; } //-------------------------------------------------------------------------- //! Create truncate request ProtocolBuffer object //-------------------------------------------------------------------------- RequestProto* utils::GetTruncateRequest(const char* path, XrdSfsFileOffset fileOffset, XrdOucErrInfo& error, const XrdSecEntity* client, const char* opaque) { eos::auth::RequestProto* req_proto = new eos::auth::RequestProto(); eos::auth::TruncateProto* truncate_proto = req_proto->mutable_truncate(); eos::auth::XrdOucErrInfoProto* xoei_proto = truncate_proto->mutable_error(); eos::auth::XrdSecEntityProto* xse_proto = truncate_proto->mutable_client(); truncate_proto->set_path(path); truncate_proto->set_fileoffset(fileOffset); ConvertToProtoBuf(&error, xoei_proto); ConvertToProtoBuf(client, xse_proto); if (opaque) { truncate_proto->set_opaque(opaque); } req_proto->set_type(RequestProto_OperationType_TRUNCATE); return req_proto; } //-------------------------------------------------------------------------- // Create directory open request ProtocolBuffer object //-------------------------------------------------------------------------- RequestProto* utils::GetDirOpenRequest(std::string&& uuid, const char* name, const XrdSecEntity* client, const char* opaque, const char* user, int monid) { eos::auth::RequestProto* req_proto = new eos::auth::RequestProto(); eos::auth::DirOpenProto* dopen_proto = req_proto->mutable_diropen(); eos::auth::XrdSecEntityProto* xse_proto = dopen_proto->mutable_client(); // Save the address of the directory object dopen_proto->set_uuid(uuid); dopen_proto->set_name(name); ConvertToProtoBuf(client, xse_proto); if (opaque) { dopen_proto->set_opaque(opaque); } dopen_proto->set_user(user); dopen_proto->set_monid(monid); req_proto->set_type(RequestProto_OperationType_DIROPEN); return req_proto; } //------------------------------------------------------------------------------ // Create directory next entry request ProtocolBuffer object //------------------------------------------------------------------------------ RequestProto* utils::GetDirReadRequest(std::string&& uuid) { eos::auth::RequestProto* req_proto = new eos::auth::RequestProto(); eos::auth::DirReadProto* dread_proto = req_proto->mutable_dirread(); dread_proto->set_uuid(uuid); req_proto->set_type(RequestProto_OperationType_DIRREAD); return req_proto; } //------------------------------------------------------------------------------ // Create directory FName request ProtocolBuffer object //------------------------------------------------------------------------------ RequestProto* utils::GetDirFnameRequest(std::string&& uuid) { eos::auth::RequestProto* req_proto = new eos::auth::RequestProto(); eos::auth::DirFnameProto* dfname_proto = req_proto->mutable_dirfname(); dfname_proto->set_uuid(uuid); req_proto->set_type(RequestProto_OperationType_DIRFNAME); return req_proto; } //------------------------------------------------------------------------------ // Create directory close request ProtocolBuffer object //------------------------------------------------------------------------------ RequestProto* utils::GetDirCloseRequest(std::string&& uuid) { eos::auth::RequestProto* req_proto = new eos::auth::RequestProto(); eos::auth::DirCloseProto* dclose_proto = req_proto->mutable_dirclose(); dclose_proto->set_uuid(uuid); req_proto->set_type(RequestProto_OperationType_DIRCLOSE); return req_proto; } //------------------------------------------------------------------------------ // Create file open request ProtocolBuffer object //------------------------------------------------------------------------------ RequestProto* utils::GetFileOpenRequest(std::string&& uuid, const char* fileName, int openMode, mode_t createMode, const XrdSecEntity* client, const char* opaque, const char* user, int monid) { eos::auth::RequestProto* req_proto = new eos::auth::RequestProto(); eos::auth::FileOpenProto* fopen_proto = req_proto->mutable_fileopen(); eos::auth::XrdSecEntityProto* xse_proto = fopen_proto->mutable_client(); // Save the address of the file object fopen_proto->set_uuid(uuid); fopen_proto->set_name(fileName); fopen_proto->set_openmode(openMode); fopen_proto->set_createmode(createMode); ConvertToProtoBuf(client, xse_proto); if (opaque) { fopen_proto->set_opaque(opaque); } fopen_proto->set_user(user); fopen_proto->set_monid(monid); req_proto->set_type(RequestProto_OperationType_FILEOPEN); return req_proto; } //------------------------------------------------------------------------------ // Create file FName request ProtocolBuffer object //------------------------------------------------------------------------------ RequestProto* utils::GetFileFnameRequest(std::string&& uuid) { eos::auth::RequestProto* req_proto = new eos::auth::RequestProto(); eos::auth::FileFnameProto* ffname_proto = req_proto->mutable_filefname(); ffname_proto->set_uuid(uuid); req_proto->set_type(RequestProto_OperationType_FILEFNAME); return req_proto; } //------------------------------------------------------------------------------ // Create file stat request ProtocolBuffer object //----------------------------------------------------------------------------- RequestProto* utils::GetFileStatRequest(std::string&& uuid) { eos::auth::RequestProto* req_proto = new eos::auth::RequestProto(); eos::auth::FileStatProto* fstat_proto = req_proto->mutable_filestat(); fstat_proto->set_uuid(uuid); req_proto->set_type(RequestProto_OperationType_FILESTAT); return req_proto; } //------------------------------------------------------------------------------ // Create file read request ProtocolBuffer object //------------------------------------------------------------------------------ RequestProto* utils::GetFileReadRequest(std::string&& uuid, long long offset, int length) { eos::auth::RequestProto* req_proto = new eos::auth::RequestProto(); eos::auth::FileReadProto* fread_proto = req_proto->mutable_fileread(); fread_proto->set_uuid(uuid); fread_proto->set_offset(offset); fread_proto->set_length(length); req_proto->set_type(RequestProto_OperationType_FILEREAD); return req_proto; } //------------------------------------------------------------------------------ // Create file write request ProtocolBuffer object //----------------------------------------------------------------------------- RequestProto* utils::GetFileWriteRequest(std::string&& uuid, long long offset, const char* buff, int length) { eos::auth::RequestProto* req_proto = new eos::auth::RequestProto(); eos::auth::FileWriteProto* fwrite_proto = req_proto->mutable_filewrite(); fwrite_proto->set_uuid(uuid); fwrite_proto->set_offset(offset); fwrite_proto->set_buff(buff); fwrite_proto->set_length(length); req_proto->set_type(RequestProto_OperationType_FILEWRITE); return req_proto; } //------------------------------------------------------------------------------ // Create file close request ProtocolBuffer object //----------------------------------------------------------------------------- RequestProto* utils::GetFileCloseRequest(std::string&& uuid) { eos::auth::RequestProto* req_proto = new eos::auth::RequestProto(); eos::auth::FileCloseProto* fclose_proto = req_proto->mutable_fileclose(); fclose_proto->set_uuid(uuid); req_proto->set_type(RequestProto_OperationType_FILECLOSE); return req_proto; } EOSAUTHNAMESPACE_END ================================================ FILE: auth_plugin/ProtoUtils.hh ================================================ //------------------------------------------------------------------------------ // File: ProtoUtils.hh // Author: Elvin-Alin Sindrilaru CERN //------------------------------------------------------------------------------ /************************************************************************ * EOS - the CERN Disk Storage System * * Copyright (C) 2013 CERN/Switzerland * * * * This program is free software: you can redistribute it and/or modify * * it under the terms of the GNU General Public License as published by * * the Free Software Foundation, either version 3 of the License, or * * (at your option) any later version. * * * * This program is distributed in the hope that it will be useful, * * but WITHOUT ANY WARRANTY; without even the implied warranty of * * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * * GNU General Public License for more details. * * * * You should have received a copy of the GNU General Public License * * along with this program. If not, see .* ************************************************************************/ #ifndef __EOS_AUTH_PROTOUTILS_HH__ #define __EOS_AUTH_PROTOUTILS_HH__ #include #include "auth_plugin/Namespace.hh" #include "auth_plugin/Request.pb.h" #include "auth_plugin/Response.pb.h" #include "auth_plugin/XrdSecEntity.pb.h" #include "auth_plugin/XrdSfsPrep.pb.h" #include "auth_plugin/XrdSfsFSctl.pb.h" #include //! Forward declarations class XrdSecEntity; class XrdOucErrInfo; struct XrdSfsFSctl; EOSAUTHNAMESPACE_BEGIN //------------------------------------------------------------------------------ //! ProtoUtils class which contains helper functions for marshalling and //! unmarshalling object to ProtocolBuffer representation //------------------------------------------------------------------------------ namespace utils { //---------------------------------------------------------------------------- //! Convert XrdSecEntity object to ProtocolBuffers representation //! //! @param obj initial object to convert //! @param proto ProtocolBuffer representation //---------------------------------------------------------------------------- void ConvertToProtoBuf(const XrdSecEntity* obj, eos::auth::XrdSecEntityProto*& proto); //---------------------------------------------------------------------------- //! Convert XrdOucErrInfo object to ProtocolBuffers representation //! //! @param obj initial object to convert //! @param proto ProtocolBuffer representation //---------------------------------------------------------------------------- void ConvertToProtoBuf(XrdOucErrInfo* obj, eos::auth::XrdOucErrInfoProto*& proto); //---------------------------------------------------------------------------- //! Convert XrSfsFsctl object to ProtocolBuffers representation //! //! @param obj initial object to convert //! @param proto ProtocolBuffer representation //---------------------------------------------------------------------------- void ConvertToProtoBuf(const XrdSfsFSctl* obj, eos::auth::XrdSfsFSctlProto*& proto); //---------------------------------------------------------------------------- //! Convert XrSfsPrep object to ProtocolBuffers representation //! //! @param obj initial object to convert //! @param proto ProtocolBuffer representation //---------------------------------------------------------------------------- void ConvertToProtoBuf(const XrdSfsPrep* obj, eos::auth::XrdSfsPrepProto*& proto); //---------------------------------------------------------------------------- //! Get XrdSecEntity object from protocol buffer object //! //! @param proto_obj protocol buffer object //! //! @return converted XrdSecEntiry object //---------------------------------------------------------------------------- XrdSecEntity* GetXrdSecEntity(const eos::auth::XrdSecEntityProto& proto_obj); //---------------------------------------------------------------------------- //! Delete XrdSecEntity object //! //! @param obj object to be deleted //---------------------------------------------------------------------------- void DeleteXrdSecEntity(XrdSecEntity*& obj); //---------------------------------------------------------------------------- //! Delete XrdSfsPrep object //! //! @param obj object to be deleted //---------------------------------------------------------------------------- void DeleteXrdSfsPrep(XrdSfsPrep*& obj); //---------------------------------------------------------------------------- //! Get XrdOucErrInfo object from protocol buffer object //! //! @param proto_obj protocol buffer object //! //! @return converted XrdOucErrInfo object //---------------------------------------------------------------------------- XrdOucErrInfo* GetXrdOucErrInfo(const eos::auth::XrdOucErrInfoProto& proto_obj); //---------------------------------------------------------------------------- //! Get XrdSfsPrep object from protocol buffer object //! //! @param proto_obj protocol buffer object //! //! @return converted XrdSfsPrep object //---------------------------------------------------------------------------- XrdSfsPrep* GetXrdSfsPrep(const eos::auth::XrdSfsPrepProto& proto_obj); //---------------------------------------------------------------------------- //! Get XrdSfsFSctl object from protocol buffer object //! //! @param proto_obj protocol buffer object //! //! @return converted XrdSfsPrep object //---------------------------------------------------------------------------- XrdSfsFSctl* GetXrdSfsFSctl(const eos::auth::XrdSfsFSctlProto& proto_obj); //---------------------------------------------------------------------------- //! Delete XrdSfsFSctl object //! //! @param obj object to be deleted //---------------------------------------------------------------------------- void DeleteXrdSfsFSctl(XrdSfsFSctl*& obj); //---------------------------------------------------------------------------- //! Compute HMAC value of the RequestProto object and append it to the //! object using the required field hmac. //! //! @param req RequestProto object //! //! @return true if computation successful and attribute updated, otherwise //! false //---------------------------------------------------------------------------- bool ComputeHMAC(eos::auth::RequestProto*& req); //---------------------------------------------------------------------------- //! Create stat request ProtocolBuffer object //! //! @param path file path //! @param error client security information obj //! @param opaque opaque information //! //! @return request ProtoBuffer object //---------------------------------------------------------------------------- eos::auth::RequestProto* GetStatRequest(eos::auth::RequestProto_OperationType type, const char* path, XrdOucErrInfo& error, const XrdSecEntity* client, const char* opaque = 0); //---------------------------------------------------------------------------- //! Create fsctl request ProtocolBuffer object //! //! @param cmd command type //! @param args command arguments //! @param error error information obj //! @param client client security information //! //! @return request ProtoBuffer object //---------------------------------------------------------------------------- eos::auth::RequestProto* GetFsctlRequest(const int cmd, const char* args, XrdOucErrInfo& error, const XrdSecEntity* client); //---------------------------------------------------------------------------- //! Create FSctl request ProtocolBuffer object //! //! @param cmd command type //! @param args command arguments structure //! @param error error information obj //! @param client client security information //! //! @return request ProtoBuffer object //---------------------------------------------------------------------------- eos::auth::RequestProto* GetFSctlRequest(const int cmd, XrdSfsFSctl& args, XrdOucErrInfo& error, const XrdSecEntity* client); //---------------------------------------------------------------------------- //! Create chmod request ProtocolBuffer object //! //! @param path directory path //! @param mode mode //! @param error error information object //! @param client client security information object //! @param opaque opaque information //! //! @return request ProtoBuffer object //---------------------------------------------------------------------------- eos::auth::RequestProto* GetChmodRequest(const char* path, int mode, XrdOucErrInfo& error, const XrdSecEntity* client, const char* opaque = 0); //---------------------------------------------------------------------------- //! Create chksum request ProtocolBuffer object //! //! @param func checksum function //! @param csname checksum name //! @param inpath input path //! @param error error information object //! @param client client security information object //! @param opaque opaque information //! //! @return request ProtoBuffer object //---------------------------------------------------------------------------- eos::auth::RequestProto* GetChksumRequest(XrdSfsFileSystem::csFunc func, const char* csname, const char* inpath, XrdOucErrInfo& error, const XrdSecEntity* client = 0, const char* opaque = 0); //---------------------------------------------------------------------------- //! Create exitst request ProtocolBuffer object //! //! @param path file/directory path //! @param error error information object //! @param client client security information object //! @param opaque opaque information //! //! @return request ProtoBuffer object //---------------------------------------------------------------------------- eos::auth::RequestProto* GetExistsRequest(const char* path, XrdOucErrInfo& error, const XrdSecEntity* client, const char* opaque = 0); //---------------------------------------------------------------------------- //! Create mkdir request ProtocolBuffer object //! //! @param path directory path //! @param mode mode //! @param error error information object //! @param client client security information object //! @param opaque opaque information //! //! @return request ProtoBuffer object //---------------------------------------------------------------------------- eos::auth::RequestProto* GetMkdirRequest(const char* path, int mode, XrdOucErrInfo& error, const XrdSecEntity* client, const char* opaque = 0); //---------------------------------------------------------------------------- //! Create remdir request ProtocolBuffer object //! //! @param path directory path //! @param error error information object //! @param client client security information object //! @param opaque opaque information //! //! @return request ProtoBuffer object //---------------------------------------------------------------------------- eos::auth::RequestProto* GetRemdirRequest(const char* path, XrdOucErrInfo& error, const XrdSecEntity* client, const char* opaque = 0); //---------------------------------------------------------------------------- //! Create rem request ProtocolBuffer object //! //! @param path file path //! @param error error information object //! @param client client security information object //! @param opaque opaque information //! //! @return request ProtoBuffer object //---------------------------------------------------------------------------- eos::auth::RequestProto* GetRemRequest(const char* path, XrdOucErrInfo& error, const XrdSecEntity* client, const char* opaque = 0); //---------------------------------------------------------------------------- //! Create rename request ProtocolBuffer object //! //! @param oldName old name //! @param newName new name //! @param error error information object //! @param client client security information object //! @param opaqueO opaque information for old name //! @param opaqueN opaque information for new name //! //! @return request ProtoBuffer object //---------------------------------------------------------------------------- eos::auth::RequestProto* GetRenameRequest(const char* oldName, const char* newName, XrdOucErrInfo& error, const XrdSecEntity* client, const char* opaqueO, const char* opaqueN); //---------------------------------------------------------------------------- //! Create prepare request ProtocolBuffer object //! //! @param pargs prepare operation arguments //! @param error error information object //! @param client client security information object //! //! @return request ProtoBuffer object //---------------------------------------------------------------------------- eos::auth::RequestProto* GetPrepareRequest(XrdSfsPrep& pargs, XrdOucErrInfo& error, const XrdSecEntity* client); //---------------------------------------------------------------------------- //! Create truncate request ProtocolBuffer object //! //! @param path file to be trunacted //! @param fileOffset truncate offset value //! @param error error information object //! @param client client security information object //! @param opaque opaque information //! //! @return request ProtoBuffer object //---------------------------------------------------------------------------- eos::auth::RequestProto* GetTruncateRequest(const char* path, XrdSfsFileOffset fileOffset, XrdOucErrInfo& error, const XrdSecEntity* client, const char* opaque); //-------------------------------------------------------------------------- //! Create directory open request ProtocolBuffer object //! //! @param uuid unqiue identifier for the current directory //! @param name name of the directory //! @param client client security information object //! @param opaque opaque information //! @param user user name passed initially to the constructor //! @param monid MonID value passed initally to the constructor //! //! @return request ProtoBuffer object //-------------------------------------------------------------------------- eos::auth::RequestProto* GetDirOpenRequest(std::string&& uuid, const char* name, const XrdSecEntity* client, const char* opaque = 0, const char* user = 0, int monid = 0); //-------------------------------------------------------------------------- //! Create directory next entry request ProtocolBuffer object //! //! @param uuid unqiue identifier for the current directory //! //! @return request ProtoBuffer object //---------------------------------------------------------------------------- eos::auth::RequestProto* GetDirReadRequest(std::string&& uuid); //---------------------------------------------------------------------------- //! Create directory FName request ProtocolBuffer object //! //! @param uuid unqiue identifier for the current directory //! //! @return request ProtoBuffer object //---------------------------------------------------------------------------- eos::auth::RequestProto* GetDirFnameRequest(std::string&& uuid); //---------------------------------------------------------------------------- //! Create directory close request ProtocolBuffer object //! //! @param uuid unqiue identifier for the current directory //! //! @return request ProtoBuffer object //---------------------------------------------------------------------------- eos::auth::RequestProto* GetDirCloseRequest(std::string&& uuid); //---------------------------------------------------------------------------- //! Create file open request ProtocolBuffer object //! //! @param uuid unqiue identifier for the current file //! @param fileName name of the file //! @param openMode open mode flags //! @param createMode create mode flag //! @param client client security information object //! @param opaque opaque information //! @param user user name passed initially to the constructor //! @param monid MonID value passed initally to the constructor //! //! @return request ProtoBuffer object //---------------------------------------------------------------------------- eos::auth::RequestProto* GetFileOpenRequest(std::string&& uuid, const char* fileName, int openMode, mode_t createMode, const XrdSecEntity* client, const char* opaque = 0, const char* user = 0, int monid = 0); //---------------------------------------------------------------------------- //! Create file FName request ProtocolBuffer object //! //! @param uuid unqiue identifier for the current file //! //! @return request ProtoBuffer object //---------------------------------------------------------------------------- eos::auth::RequestProto* GetFileFnameRequest(std::string&& uuid); //---------------------------------------------------------------------------- //! Create file stat request ProtocolBuffer object //! //! @param uuid unqiue identifier for the current file //! //! @return request ProtoBuffer object //---------------------------------------------------------------------------- eos::auth::RequestProto* GetFileStatRequest(std::string&& uuid); //---------------------------------------------------------------------------- //! Create file read request ProtocolBuffer object //! //! @param uuid unqiue identifier for the current file //! @param offset offset in file //! @param length lenght of read //! //! @return request ProtoBuffer object //---------------------------------------------------------------------------- eos::auth::RequestProto* GetFileReadRequest(std::string&& uuid, long long offset, int length); //---------------------------------------------------------------------------- //! Create file write request ProtocolBuffer object //! //! @param uuid unqiue identifier for the current file //! @param offset offset in file //! @param buff data buffer to be written //! @param length lenght of read //! //! @return request ProtoBuffer object //---------------------------------------------------------------------------- eos::auth::RequestProto* GetFileWriteRequest(std::string&& uuid, long long offset, const char* buff, int length); //---------------------------------------------------------------------------- //! Create file close request ProtocolBuffer object //! //! @param uuid unqiue identifier for the current directory //! //! @return request ProtoBuffer object //---------------------------------------------------------------------------- eos::auth::RequestProto* GetFileCloseRequest(std::string&& uuid); } EOSAUTHNAMESPACE_END #endif //__EOS_AUTH_PROTOUTILS_HH__ ================================================ FILE: auth_plugin/proto/Chksum.proto ================================================ syntax = "proto2"; package eos.auth; import "XrdOucErrInfo.proto"; import "XrdSecEntity.proto"; //------------------------------------------------------------------------------ // Chksum request protocol buffer message //------------------------------------------------------------------------------ message ChksumProto { required int64 func = 1; required string csname = 2; required string path = 3; required XrdOucErrInfoProto error = 4; optional XrdSecEntityProto client = 5 ; optional string opaque = 6 [default = ""]; } ================================================ FILE: auth_plugin/proto/Chmod.proto ================================================ syntax = "proto2"; package eos.auth; import "XrdOucErrInfo.proto"; import "XrdSecEntity.proto"; //------------------------------------------------------------------------------ // Chmod request protocol buffer message //------------------------------------------------------------------------------ message ChmodProto { required string path = 1; required int64 mode = 2; required XrdOucErrInfoProto error = 3; required XrdSecEntityProto client = 4; optional string opaque = 5 [default = ""]; } ================================================ FILE: auth_plugin/proto/DirClose.proto ================================================ syntax = "proto2"; package eos.auth; //------------------------------------------------------------------------------ // DirClose request protocol buffer message //------------------------------------------------------------------------------ message DirCloseProto { required string uuid = 1; ///< this is the pointer to the local directory object } ================================================ FILE: auth_plugin/proto/DirFname.proto ================================================ syntax = "proto2"; package eos.auth; //------------------------------------------------------------------------------ // DirFname request protocol buffer message //------------------------------------------------------------------------------ message DirFnameProto { required string uuid = 1; ///< this is the pointer to the local directory object } ================================================ FILE: auth_plugin/proto/DirOpen.proto ================================================ syntax = "proto2"; package eos.auth; import "XrdSecEntity.proto"; //------------------------------------------------------------------------------ // DirOpen request protocol buffer message //------------------------------------------------------------------------------ message DirOpenProto { required string uuid = 1; ///< this is the pointer to the local directory object required string name = 2; optional XrdSecEntityProto client = 3 ; optional string opaque = 4 [default = ""]; optional string user = 5 [default = ""]; ///< this and the following are the values passed optional int64 monid = 6 [default = 0]; ///< to the constructor of the directory object } ================================================ FILE: auth_plugin/proto/DirRead.proto ================================================ syntax = "proto2"; package eos.auth; //------------------------------------------------------------------------------ // DirRead request protocol buffer message //------------------------------------------------------------------------------ message DirReadProto { required string uuid = 1; ///< this is the pointer to the local directory object } ================================================ FILE: auth_plugin/proto/Exists.proto ================================================ syntax = "proto2"; package eos.auth; import "XrdOucErrInfo.proto"; import "XrdSecEntity.proto"; //------------------------------------------------------------------------------ // Exists request protocol buffer message //------------------------------------------------------------------------------ message ExistsProto { required string path = 1; required XrdOucErrInfoProto error = 3; required XrdSecEntityProto client = 4; optional string opaque = 5; } ================================================ FILE: auth_plugin/proto/FS_ctl.proto ================================================ syntax = "proto2"; package eos.auth; import "XrdOucErrInfo.proto"; import "XrdSecEntity.proto"; import "XrdSfsFSctl.proto"; //------------------------------------------------------------------------------ // FSctl request protocol buffer message used for FSclt(..) function calls //------------------------------------------------------------------------------ message FSctlProto { required int64 cmd = 1; required XrdSfsFSctlProto args = 2; required XrdOucErrInfoProto error = 3; required XrdSecEntityProto client = 4; } ================================================ FILE: auth_plugin/proto/FileClose.proto ================================================ syntax = "proto2"; package eos.auth; //------------------------------------------------------------------------------ // FileClose request protocol buffer message //------------------------------------------------------------------------------ message FileCloseProto { required string uuid = 1; ///< this is the pointer to the local directory object } ================================================ FILE: auth_plugin/proto/FileFname.proto ================================================ syntax = "proto2"; package eos.auth; //------------------------------------------------------------------------------ // FileFname request protocol buffer message //------------------------------------------------------------------------------ message FileFnameProto { required string uuid = 1; ///< this is the pointer to the local directory object } ================================================ FILE: auth_plugin/proto/FileOpen.proto ================================================ syntax = "proto2"; package eos.auth; import "XrdSecEntity.proto"; //------------------------------------------------------------------------------ // FileOpen request protocol buffer message //------------------------------------------------------------------------------ message FileOpenProto { required string uuid = 1; ///< this is the pointer to the local directory object required string name = 2; required int64 openmode = 3; required int64 createmode = 4; optional XrdSecEntityProto client = 5 ; optional string opaque = 6 [default = ""]; optional string user = 7 [default = ""]; ///< this and the following are the values passed optional int64 monid = 8 [default = 0]; ///< to the constructor of the directory object } ================================================ FILE: auth_plugin/proto/FileRead.proto ================================================ syntax = "proto2"; package eos.auth; //------------------------------------------------------------------------------ // FileRead request protocol buffer message //------------------------------------------------------------------------------ message FileReadProto { required string uuid = 1; ///< this is the pointer to the local directory object required int64 offset = 2; required int64 length = 4; } ================================================ FILE: auth_plugin/proto/FileStat.proto ================================================ syntax = "proto2"; package eos.auth; //------------------------------------------------------------------------------ // FileStat request protocol buffer message //------------------------------------------------------------------------------ message FileStatProto { required string uuid = 1; ///< this is the pointer to the local directory object } ================================================ FILE: auth_plugin/proto/FileWrite.proto ================================================ syntax = "proto2"; package eos.auth; //------------------------------------------------------------------------------ // FileWrite request protocol buffer message //------------------------------------------------------------------------------ message FileWriteProto { required string uuid = 1; ///< this is the pointer to the local directory object required int64 offset = 2; required bytes buff = 3; required int64 length = 4; } ================================================ FILE: auth_plugin/proto/Fsctl.proto ================================================ syntax = "proto2"; package eos.auth; import "XrdOucErrInfo.proto"; import "XrdSecEntity.proto"; //------------------------------------------------------------------------------ // Fsctl request protocol buffer message used for Fsctl(..) function calls //------------------------------------------------------------------------------ message FsctlProto { required int64 cmd = 1; required string args = 2; required XrdOucErrInfoProto error = 3; required XrdSecEntityProto client = 4; } ================================================ FILE: auth_plugin/proto/GetStats.proto ================================================ syntax = "proto2"; package eos.auth; //------------------------------------------------------------------------------ // getStats request protocol buffer message //------------------------------------------------------------------------------ message GetStatsProto { // empty } ================================================ FILE: auth_plugin/proto/Mkdir.proto ================================================ syntax = "proto2"; package eos.auth; import "XrdOucErrInfo.proto"; import "XrdSecEntity.proto"; //------------------------------------------------------------------------------ // Mkdir request protocol buffer message //------------------------------------------------------------------------------ message MkdirProto { required string path = 1; required int64 mode = 2; required XrdOucErrInfoProto error = 3; required XrdSecEntityProto client = 4; optional string opaque = 5 [default = ""]; } ================================================ FILE: auth_plugin/proto/Prepare.proto ================================================ syntax = "proto2"; package eos.auth; import "XrdOucErrInfo.proto"; import "XrdSecEntity.proto"; import "XrdSfsPrep.proto"; //------------------------------------------------------------------------------ // Prepare request protocol buffer message //------------------------------------------------------------------------------ message PrepareProto { required XrdSfsPrepProto pargs = 1; required XrdOucErrInfoProto error = 2; required XrdSecEntityProto client = 3; } ================================================ FILE: auth_plugin/proto/Rem.proto ================================================ syntax = "proto2"; package eos.auth; import "XrdOucErrInfo.proto"; import "XrdSecEntity.proto"; //------------------------------------------------------------------------------ // Rem request protocol buffer message //------------------------------------------------------------------------------ message RemProto { required string path = 1; required XrdOucErrInfoProto error = 2; required XrdSecEntityProto client = 3; optional string opaque = 4 [default = ""]; } ================================================ FILE: auth_plugin/proto/Remdir.proto ================================================ syntax = "proto2"; package eos.auth; import "XrdOucErrInfo.proto"; import "XrdSecEntity.proto"; //------------------------------------------------------------------------------ // Remdir request protocol buffer message //------------------------------------------------------------------------------ message RemdirProto { required string path = 1; required XrdOucErrInfoProto error = 2; required XrdSecEntityProto client = 3; optional string opaque = 4 [default = ""]; } ================================================ FILE: auth_plugin/proto/Rename.proto ================================================ syntax = "proto2"; package eos.auth; import "XrdOucErrInfo.proto"; import "XrdSecEntity.proto"; //------------------------------------------------------------------------------ // Rename request protocol buffer message //------------------------------------------------------------------------------ message RenameProto { required string oldname = 1; required string newname = 2; required XrdOucErrInfoProto error = 3; required XrdSecEntityProto client = 4; optional string opaqueo = 5 [default = ""]; optional string opaquen = 6 [default = ""]; } ================================================ FILE: auth_plugin/proto/Request.proto ================================================ syntax = "proto2"; package eos.auth; import "Stat.proto"; import "Fsctl.proto"; import "FS_ctl.proto"; import "Chmod.proto"; import "Chksum.proto"; import "Exists.proto"; import "Mkdir.proto"; import "Remdir.proto"; import "Rem.proto"; import "Rename.proto"; import "Prepare.proto"; import "Truncate.proto"; import "DirOpen.proto"; import "DirRead.proto"; import "DirFname.proto"; import "DirClose.proto"; import "FileOpen.proto"; import "FileFname.proto"; import "FileStat.proto"; import "FileRead.proto"; import "FileWrite.proto"; import "FileClose.proto"; //------------------------------------------------------------------------------ // Request message sent to the server //------------------------------------------------------------------------------ message RequestProto { enum OperationType { STAT = 0; // stat to get struct stat FSCTL1 = 1; // fsctl FSCTL2 = 2; // FSctl CHMOD = 3; CHKSUM = 4; EXISTS = 5; STATM = 6; // stat mode MKDIR = 7; REMDIR = 8; REM = 9; RENAME = 10; PREPARE = 11; TRUNCATE = 12; DIROPEN = 13; DIRFNAME = 14; DIRREAD = 15; DIRCLOSE = 16; FILEOPEN = 17; FILEFNAME = 18; FILESTAT = 19; FILEREAD = 20; FILEWRITE = 21; FILECLOSE = 22; } // Identifies which filed is filled in required OperationType type = 1; // Encrypted sha1 of the string representation of the object excluding // the field 'hmac' which is to be considered as empty string ("") during // the computation and then updated at the end to the new value required string hmac = 2; // One of the following is filled in optional StatProto stat = 3; optional FsctlProto fsctl1 = 4; optional FSctlProto fsctl2 = 5; optional ChmodProto chmod = 6; optional ChksumProto chksum = 7; optional ExistsProto exists = 8; optional StatProto statm = 9; optional MkdirProto mkdir = 10; optional RemdirProto remdir = 11; optional RemProto rem = 12; optional RenameProto rename = 13; optional PrepareProto prepare = 14; optional TruncateProto truncate = 15; optional DirOpenProto diropen = 16; optional DirReadProto dirread = 17; optional DirFnameProto dirfname = 18; optional DirCloseProto dirclose = 19; optional FileOpenProto fileopen = 20; optional FileFnameProto filefname = 21; optional FileStatProto filestat = 22; optional FileReadProto fileread = 23; optional FileWriteProto filewrite = 24; optional FileCloseProto fileclose = 25; } ================================================ FILE: auth_plugin/proto/Response.proto ================================================ syntax = "proto2"; package eos.auth; import "XrdOucErrInfo.proto"; //------------------------------------------------------------------------------ // Response object received from the server for most of the requests //------------------------------------------------------------------------------ message ResponseProto { required int64 response = 1; optional bytes message = 2; optional XrdOucErrInfoProto error = 3; optional bool collapse = 4; } ================================================ FILE: auth_plugin/proto/Stat.proto ================================================ syntax = "proto2"; package eos.auth; import "XrdOucErrInfo.proto"; import "XrdSecEntity.proto"; //------------------------------------------------------------------------------ // Stat request protocol buffer message //------------------------------------------------------------------------------ message StatProto { required string path = 1; required XrdOucErrInfoProto error = 2; required XrdSecEntityProto client = 3; optional string opaque = 4 [default = ""]; } ================================================ FILE: auth_plugin/proto/Truncate.proto ================================================ syntax = "proto2"; package eos.auth; import "XrdOucErrInfo.proto"; import "XrdSecEntity.proto"; //------------------------------------------------------------------------------ // Truncate request protocol buffer message //------------------------------------------------------------------------------ message TruncateProto { required string path = 1; required int64 fileoffset = 2; required XrdOucErrInfoProto error = 3; optional XrdSecEntityProto client = 4; optional string opaque = 5 [default = ""]; } ================================================ FILE: auth_plugin/proto/XrdOucErrInfo.proto ================================================ syntax = "proto2"; package eos.auth; //------------------------------------------------------------------------------ // XrdOucErrInfo request protocol buffer message - only required fileds //------------------------------------------------------------------------------ message XrdOucErrInfoProto { required string user = 1; optional int64 code = 2; optional string message = 3; } ================================================ FILE: auth_plugin/proto/XrdSecEntity.proto ================================================ syntax = "proto2"; package eos.auth; //------------------------------------------------------------------------------ // XrdSecEntity protocol buffer representation //------------------------------------------------------------------------------ message XrdSecEntityProto { required string prot = 1; // Protocol used required string name = 2 ; // Entity's name required string host = 3; // Entity's host name required string vorg = 4; // Entity's virtual organization required string role = 5; // Entity's role required string grps = 6; // Entity's group names required string endorsements = 7; // Protocol specific endorsements required string creds = 8; // Raw client credentials or certificate required int64 credslen = 9; // Length of the 'cert' field required string moninfo = 10; // Additional information for monitoring required string tident = 11; // Trace identifier (do not touch) } ================================================ FILE: auth_plugin/proto/XrdSfsFSctl.proto ================================================ syntax = "proto2"; package eos.auth; //------------------------------------------------------------------------------ // XrdSfsFSctl structure protocol buffer representation // For info look into XrdSfs/XrdSfsInterface.hh //------------------------------------------------------------------------------ message XrdSfsFSctlProto { required string Arg1 = 1; required int64 Arg1Len = 2; optional int64 Arg2Len = 3 [default = 0]; optional string Arg2 = 4; } ================================================ FILE: auth_plugin/proto/XrdSfsPrep.proto ================================================ syntax = "proto2"; package eos.auth; //------------------------------------------------------------------------------ // XrdSfsPrep request protocol buffer message //------------------------------------------------------------------------------ message XrdSfsPrepProto { required string reqid = 1; required string notify = 2; required int64 opts = 3; repeated string paths = 4; repeated string oinfo = 5; } ================================================ FILE: client/CMakeLists.txt ================================================ #------------------------------------------------------------------------------- # File: CMakeLists.txt # Author: Andreas-Joachim Peters - CERN #------------------------------------------------------------------------------- # ************************************************************************ # * EOS - the CERN Disk Storage System * # * Copyright (C) 2018 CERN/Switzerland * # * * # * This program is free software: you can redistribute it and/or modify * # * it under the terms of the GNU General Public License as published by * # * the Free Software Foundation, either version 3 of the License, or * # * (at your option) any later version. * # * * # * This program is distributed in the hope that it will be useful, * # * but WITHOUT ANY WARRANTY; without even the implied warranty of * # * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * # * GNU General Public License for more details. * # * * # * You should have received a copy of the GNU General Public License * # * along with this program. If not, see .* # ************************************************************************ include_directories(${CMAKE_BINARY_DIR}) #------------------------------------------------------------------------------- # Disable -Wsign-compare warnings due to # grpcpp/support/proto_buffer_reader.h:157:24: warning: comparison of # integer expressions of different signedness: ‘uint64_t’ {aka ‘long # unsigned int’} and ‘int’ [-Wsign-compare] #------------------------------------------------------------------------------- add_compile_options(-Wno-sign-compare) #------------------------------------------------------------------------------- # eos executable #------------------------------------------------------------------------------- add_library(EosGrpcClient-Objects OBJECT grpc/GrpcClient.cc grpc/GrpcClient.hh) target_compile_options(EosGrpcClient-Objects PRIVATE -Wno-sign-compare) target_link_libraries(EosGrpcClient-Objects PUBLIC EosGrpcProto-Objects XROOTD::UTILS) target_compile_definitions(EosGrpcClient-Objects PUBLIC -DDAEMONUID=${DAEMONUID} -DDAEMONGID=${DAEMONGID} -DHAVE_ATOMICS=1) set_target_properties(EosGrpcClient-Objects PROPERTIES POSITION_INDEPENDENT_CODE TRUE) add_executable(eos-grpc-ping grpc/Ping.cc) add_executable(eos-grpc-md grpc/Md.cc) add_executable(eos-grpc-find grpc/Find.cc) add_executable(eos-grpc-ns grpc/Ns.cc) add_executable(eos-grpc-insert grpc/Insert.cc) add_executable(eos-grpc-ns-stat grpc/NsStat.cc) #------------------------------------------------------------------------------- # Add dependency which guarantees that the protocol buffer files are generated # when we build the executables #------------------------------------------------------------------------------- target_link_libraries(eos-grpc-ping PUBLIC EosGrpcProto-Objects EosGrpcClient-Objects EosCommon ) target_link_libraries(eos-grpc-md PUBLIC EosGrpcProto-Objects EosGrpcClient-Objects EosCommon) target_link_libraries(eos-grpc-find PUBLIC EosGrpcProto-Objects EosGrpcClient-Objects EosCommon ) target_link_libraries(eos-grpc-ns PUBLIC EosGrpcProto-Objects EosGrpcClient-Objects EosConsoleHelpers-Objects EosCommon) target_link_libraries(eos-grpc-insert PUBLIC EosGrpcProto-Objects EosGrpcClient-Objects EosCommon ) target_link_libraries(eos-grpc-ns-stat PUBLIC EosGrpcProto-Objects EosGrpcClient-Objects EosCommon) install(TARGETS eos-grpc-ping eos-grpc-md eos-grpc-insert eos-grpc-ns eos-grpc-find eos-grpc-ns-stat LIBRARY DESTINATION ${CMAKE_INSTALL_FULL_LIBDIR} RUNTIME DESTINATION ${CMAKE_INSTALL_FULL_BINDIR} ARCHIVE DESTINATION ${CMAKE_INSTALL_FULL_LIBDIR}) ================================================ FILE: client/Namespace.hh ================================================ // ---------------------------------------------------------------------- // File: Namespace.hh // Author: Andreas-Joachim Peters - CERN // ---------------------------------------------------------------------- /************************************************************************ * EOS - the CERN Disk Storage System * * Copyright (C) 2011 CERN/Switzerland * * * * This program is free software: you can redistribute it and/or modify * * it under the terms of the GNU General Public License as published by * * the Free Software Foundation, either version 3 of the License, or * * (at your option) any later version. * * * * This program is distributed in the hope that it will be useful, * * but WITHOUT ANY WARRANTY; without even the implied warranty of * * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * * GNU General Public License for more details. * * * * You should have received a copy of the GNU General Public License * * along with this program. If not, see .* ************************************************************************/ #ifndef __EOSCLIENT_NAMESPACE_HH__ #define __EOSCLIENT_NAMESPACE_HH__ #define USE_EOSCLIENTNAMESPACE using namespace eos::client; #define EOSCLIENTNAMESPACE_BEGIN namespace eos { namespace client { #define EOSCLIENTNAMESPACE_END }} #endif ================================================ FILE: client/grpc/Find.cc ================================================ #include #include #include "client/grpc/GrpcClient.hh" #include #include "common/StringConversion.hh" int usage(const char* prog) { fprintf(stderr, "usage: %s [--key " "--cert " "--ca ] " "[--endpoint ] [--token ] [--export ] [--depth ] [--select ] [--force-ssl] [-f | -d] \n", prog); fprintf(stderr, " is setup as \"key1:val1,key2:val2,key3:val3 ... where keyN:valN is one of \n"); fprintf(stderr, "" " owner-root:1|0\n" " group-root:1|0\n" " owner:\n" " group:\n" " regex-filename:\n" " regex-dirname:\n" " zero-size:1|0\n" " min-size:\n" " max-size:\n" " min-children:\n" " max-children:\n" " zero-children:1|0\n" " min-locations:\n" " max-locations:\n" " zero-locations:1|0\n" " min-unlinked_locations:\n" " max-unlinked_locations:\n" " max-treesize:\n" " zero-treesize:1|0\n" " min-ctime:\n" " max-ctime:\n" " zero-ctime:1|0\n" " min-mtime:\n" " max-mtime:\n" " zero-mtime:1|0\n" " min-stime:\n" " max-stime:\n" " zero-stime:1|0\n" " layoutid:\n" " flags:\n" " symlink:1|0\n" " checksum-type:\n" " checksum-value:\n" " xattr:=\n"); return -1; } int main(int argc, const char* argv[]) { std::string endpoint = "localhost:50051"; std::string token = ""; std::string key; std::string cert; std::string ca; std::string keyfile; std::string certfile; std::string cafile; std::string path = ""; std::list select; bool files = false; bool dirs = false; uint64_t depth = 1024; std::string exportfs = ""; bool force_ssl = false; for (auto i = 1; i < argc; ++i) { std::string option = argv[i]; if (option == "--key") { if (argc > i + 1) { keyfile = argv[i + 1]; ++i; continue; } else { return usage(argv[0]); } } if (option == "--cert") { if (argc > i + 1) { certfile = argv[i + 1]; ++i; continue; } else { return usage(argv[0]); } } if (option == "--ca") { if (argc > i + 1) { cafile = argv[i + 1]; ++i; continue; } else { return usage(argv[0]); } } if (option == "--endpoint") { if (argc > i + 1) { endpoint = argv[i + 1]; ++i; continue; } else { return usage(argv[0]); } } if (option == "--token") { if (argc > i + 1) { token = argv[i + 1]; ++i; continue; } else { return usage(argv[0]); } } if (option == "--export") { if (argc > i + 1) { exportfs = argv[i + 1]; ++i; continue; } else { return usage(argv[0]); } } if (option == "--depth") { if (argc > i + 1) { depth = strtoull(argv[i + 1], 0, 10); ++i; continue; } else { return usage(argv[0]); } } if (option == "--select") { if (argc > i + 1) { select.push_back(argv[i + 1]); ++i; continue; } else { return usage(argv[0]); } } if (option == "-f") { files = true; continue; } if (option == "-d") { dirs = true; continue; } if (option == "--force-ssl") { force_ssl = true; continue; } path = option; if (argc > (i + 1)) { return usage(argv[0]); } } if (!files && ! dirs) { files = true; dirs = true; } if (keyfile.length() || certfile.length() || cafile.length()) { if (!keyfile.length() || !certfile.length() || !cafile.length()) { return usage(argv[0]); } } if (path.empty()) { return usage(argv[0]); } if (path.front() != '/') { return usage(argv[0]); } std::unique_ptr eosgrpc = eos::client::GrpcClient::Create( endpoint, token, keyfile, certfile, cafile, force_ssl); if (!eosgrpc) { return usage(argv[0]); } std::chrono::steady_clock::time_point watch_global = std::chrono::steady_clock::now(); std::string reply = eosgrpc->Find(path, select, 0, 0, files, dirs, depth, true, exportfs); std::chrono::microseconds elapsed_global = std::chrono::duration_cast (std::chrono::steady_clock::now() - watch_global); std::cout << "request took " << elapsed_global.count() << " micro seconds" << std::endl; return 0; } ================================================ FILE: client/grpc/GrpcClient.cc ================================================ // ---------------------------------------------------------------------- // File: GrpccLIENT.cc // Author: Andreas-Joachim Peters - CERN // ---------------------------------------------------------------------- /************************************************************************ * EOS - the CERN Disk Storage System * * Copyright (C) 2018 CERN/Switzerland * * * * This program is free software: you can redistribute it and/or modify * * it under the terms of the GNU General Public License as published by * * the Free Software Foundation, either version 3 of the License, or * * (at your option) any later version. * * * * This program is distributed in the hope that it will be useful, * * but WITHOUT ANY WARRANTY; without even the implied warranty of * * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * * GNU General Public License for more details. * * * * You should have received a copy of the GNU General Public License * * along with this program. If not, see .* ************************************************************************/ #ifdef EOS_GRPC #include "GrpcClient.hh" #include "proto/Rpc.grpc.pb.h" #include "common/StringConversion.hh" #include "common/Timing.hh" #include "common/Path.hh" #include #include #include #include #include EOSCLIENTNAMESPACE_BEGIN using grpc::Channel; using grpc::ClientAsyncResponseReader; using grpc::ClientAsyncReader; using grpc::ClientContext; using grpc::CompletionQueue; using grpc::Status; using eos::rpc::Eos; using eos::rpc::PingRequest; using eos::rpc::PingReply; using eos::rpc::MDRequest; using eos::rpc::MDResponse; using eos::rpc::FindRequest; using eos::rpc::NSRequest; using eos::rpc::NSResponse; using eos::rpc::NsStatRequest; using eos::rpc::NsStatResponse; using eos::rpc::FileInsertRequest; using eos::rpc::ContainerInsertRequest; using eos::rpc::InsertReply; using eos::rpc::ContainerMdProto; using eos::rpc::FileMdProto; std::string GrpcClient::Ping(const std::string& payload) { PingRequest request; request.set_message(payload); request.set_authkey(token()); PingReply reply; ClientContext context; // The producer-consumer queue we use to communicate asynchronously with the // gRPC runtime. CompletionQueue cq; Status status; // stub_->AsyncPing() performs the RPC call, returning an instance we // store in "rpc". Because we are using the asynchronous API, we need to // hold on to the "rpc" instance in order to get updates on the ongoing RPC. std::unique_ptr > rpc( stub_->AsyncPing(&context, request, &cq)); // Request that, upon completion of the RPC, "reply" be updated with the // server's response; "status" with the indication of whether the operation // was successful. Tag the request with the integer 1. rpc->Finish(&reply, &status, (void*) 1); void* got_tag; bool ok = false; // Block until the next result is available in the completion queue "cq". // The return value of Next should always be checked. This return value // tells us whether there is any kind of event or the cq_ is shutting down. ABSL_CHECK(cq.Next(&got_tag, &ok)); // Verify that the result from "cq" corresponds, by its tag, our previous // request. ABSL_CHECK(got_tag == (void*) 1); // ... and that the request was completed successfully. Note that "ok" // corresponds solely to the request for updates introduced by Finish(). ABSL_CHECK(ok); // Act upon the status of the actual RPC. if (status.ok()) { return reply.message(); } else { return ""; } } std::string GrpcClient::Md(const std::string& path, uint64_t id, uint64_t ino, bool list, bool printonly) { MDRequest request; if (list) { request.set_type(eos::rpc::LISTING); } else { request.set_type(eos::rpc::STAT); } if (path.length()) { request.mutable_id()->set_path(path); } else if (id) { request.mutable_id()->set_id(id); } else if (ino) { request.mutable_id()->set_ino(ino); } else { return ""; } request.set_authkey(token()); MDResponse response; ClientContext context; std::string responsestring; CompletionQueue cq; Status status; std::unique_ptr > rpc( stub_->AsyncMD(&context, request, &cq, (void*) 1)); void* got_tag; bool ok = false; bool ret = cq.Next(&got_tag, &ok); while (1) { rpc->Read(&response, (void*) 1); ok = false; ret = cq.Next(&got_tag, &ok); if (!ret || !ok || got_tag != (void*) 1) { break; } google::protobuf::util::JsonPrintOptions options; options.add_whitespace = true; #if GOOGLE_PROTOBUF_VERSION >= 5027000 options.always_print_fields_with_no_presence = true; #else options.always_print_primitive_fields = true; #endif std::string jsonstring; (void) google::protobuf::util::MessageToJsonString(response, &jsonstring, options); if (printonly) { std::cout << jsonstring << std::endl; } else { responsestring += jsonstring; } } if (!status.ok()) { std::cerr << "error: " << status.error_message() << std::endl; } return responsestring; } std::string GrpcClient::Find(const std::string& path, const std::list& filters, uint64_t id, uint64_t ino, bool files, bool dirs, uint64_t depth, bool printonly, const std::string& exportfs) { FindRequest request; if (files && !dirs) { // query files request.set_type(eos::rpc::FILE); } else if (dirs && !files) { // query container request.set_type(eos::rpc::CONTAINER); } else { // query files & container request.set_type(eos::rpc::LISTING); } if (path.length()) { request.mutable_id()->set_path(path); } else if (id) { request.mutable_id()->set_id(id); } else if (ino) { request.mutable_id()->set_ino(ino); } else { return ""; } if (depth) { request.set_maxdepth(depth); } request.set_authkey(token()); for (const auto& filter : filters) { // enable filtering request.mutable_selection()->set_select(true); std::map filtermap; eos::common::StringConversion::GetKeyValueMap(filter.c_str(), filtermap); for (auto const& x : filtermap) { if (x.first == "owner-root") { request.mutable_selection()->set_owner_root(strtoul(x.second.c_str(), 0, 10) ? true : false); } else if (x.first == "group-root") { request.mutable_selection()->set_group_root(strtoul(x.second.c_str(), 0, 10) ? true : false); } else if (x.first == "owner") { request.mutable_selection()->set_owner(strtoul(x.second.c_str(), 0, 10)); } else if (x.first == "group") { request.mutable_selection()->set_group(strtoul(x.second.c_str(), 0, 10)); } else if (x.first == "regex-filename") { request.mutable_selection()->set_regexp_filename(x.second); } else if (x.first == "regex-dirname") { request.mutable_selection()->set_regexp_dirname(x.second); } else if (x.first == "zero-size") { request.mutable_selection()->mutable_size()->set_zero(strtoul(x.second.c_str(), 0, 10) ? true : false); } else if (x.first == "min-size") { request.mutable_selection()->mutable_size()->set_min(strtoul(x.second.c_str(), 0, 10)); } else if (x.first == "max-size") { request.mutable_selection()->mutable_size()->set_max(strtoul(x.second.c_str(), 0, 10)); } else if (x.first == "min-children") { request.mutable_selection()->mutable_children()->set_min(strtoul( x.second.c_str(), 0, 10)); } else if (x.first == "max-children") { request.mutable_selection()->mutable_children()->set_max(strtoul( x.second.c_str(), 0, 10)); } else if (x.first == "zero-children") { request.mutable_selection()->mutable_children()->set_zero(strtoul( x.second.c_str(), 0, 10) ? true : false); } else if (x.first == "min-locations") { request.mutable_selection()->mutable_locations()->set_min(strtoul( x.second.c_str(), 0, 10)); } else if (x.first == "max-locations") { request.mutable_selection()->mutable_locations()->set_max(strtoul( x.second.c_str(), 0, 10)); } else if (x.first == "zero-locations") { request.mutable_selection()->mutable_locations()->set_zero(strtoul( x.second.c_str(), 0, 10) ? true : false); } else if (x.first == "min-unlinked_locations") { request.mutable_selection()->mutable_unlinked_locations()->set_min(strtoul( x.second.c_str(), 0, 10)); } else if (x.first == "max-unlinked_locations") { request.mutable_selection()->mutable_unlinked_locations()->set_max(strtoul( x.second.c_str(), 0, 10)); } else if (x.first == "zero-unlinked_locations") { request.mutable_selection()->mutable_unlinked_locations()->set_zero(strtoul( x.second.c_str(), 0, 10) ? true : false); } else if (x.first == "min-treesize") { request.mutable_selection()->mutable_treesize()->set_min(strtoul( x.second.c_str(), 0, 10)); } else if (x.first == "max-treesize") { request.mutable_selection()->mutable_treesize()->set_max(strtoul( x.second.c_str(), 0, 10)); } else if (x.first == "zero-treesize") { request.mutable_selection()->mutable_treesize()->set_zero(strtoul( x.second.c_str(), 0, 10) ? true : false); } else if (x.first == "min-ctime") { request.mutable_selection()->mutable_ctime()->set_min(strtoul(x.second.c_str(), 0, 10)); } else if (x.first == "max-ctime") { request.mutable_selection()->mutable_ctime()->set_max(strtoul(x.second.c_str(), 0, 10)); } else if (x.first == "zero-ctime") { request.mutable_selection()->mutable_ctime()->set_zero(strtoul(x.second.c_str(), 0, 10) ? true : false); } else if (x.first == "min-mtime") { request.mutable_selection()->mutable_mtime()->set_min(strtoul(x.second.c_str(), 0, 10)); } else if (x.first == "max-mtime") { request.mutable_selection()->mutable_mtime()->set_max(strtoul(x.second.c_str(), 0, 10)); } else if (x.first == "zero-mtime") { request.mutable_selection()->mutable_mtime()->set_zero(strtoul(x.second.c_str(), 0, 10) ? true : false); } else if (x.first == "min-stime") { request.mutable_selection()->mutable_stime()->set_min(strtoul(x.second.c_str(), 0, 10)); } else if (x.first == "max-stime") { request.mutable_selection()->mutable_stime()->set_max(strtoul(x.second.c_str(), 0, 10)); } else if (x.first == "zero-stime") { request.mutable_selection()->mutable_stime()->set_zero(strtoul(x.second.c_str(), 0, 10) ? true : false); } else if (x.first == "layoutid") { request.mutable_selection()->set_layoutid(strtoull(x.second.c_str(), 0, 10)); } else if (x.first == "flags") { request.mutable_selection()->set_flags(strtoull(x.second.c_str(), 0, 10)); } else if (x.first == "symlink") { request.mutable_selection()->set_symlink(strtoul(x.second.c_str(), 0, 10) ? true : false); } else if (x.first == "checksum-type") { request.mutable_selection()->mutable_checksum()->set_type(x.second); } else if (x.first == "checksum-value") { request.mutable_selection()->mutable_checksum()->set_value(x.second); } else if (x.first == "xattr") { std::string key; std::string val; eos::common::StringConversion::SplitKeyValue(x.second, key, val, "="); (*(request.mutable_selection()->mutable_xattr()))[key] = val; } else { std::cerr << "error: unknown filter '" << x.first << ":" << x.second << "'" << std::endl; return ""; } } } MDResponse response; ClientContext context; std::string responsestring; CompletionQueue cq; Status status; std::unique_ptr > rpc( stub_->AsyncFind(&context, request, &cq, (void*) 1)); void* got_tag; bool ok = false; bool ret = cq.Next(&got_tag, &ok); while (1) { rpc->Read(&response, (void*) 1); ok = false; ret = cq.Next(&got_tag, &ok); if (!ret || !ok || got_tag != (void*) 1) { break; } if (!exportfs.empty()) { responsestring = ExportFs(response, exportfs); } else { google::protobuf::util::JsonPrintOptions options; options.add_whitespace = true; #if GOOGLE_PROTOBUF_VERSION >= 5027000 options.always_print_fields_with_no_presence = true; #else options.always_print_primitive_fields = true; #endif std::string jsonstring; (void) google::protobuf::util::MessageToJsonString(response, &jsonstring, options); if (printonly) { std::cout << jsonstring << std::endl; } else { responsestring += jsonstring; } } } if (!status.ok()) { std::cerr << "error: " << status.error_message() << std::endl; } return responsestring; } int GrpcClient::FileInsert(const std::vector& paths) { FileInsertRequest request; size_t cnt = 0; for (auto it : paths) { std::string path = it; struct timespec tsnow; eos::common::Timing::GetTimeSpec(tsnow); uint64_t inode = 0; cnt++; FileMdProto* file = request.add_files(); if (it.substr(0, 4) == "ino:") { // the format is ino:xxxxxxxxxxxxxxxx: where xxxxxxxxxxxxxxxx is a 64bit hex string of the inode path = it.substr(21); inode = std::strtol(it.substr(4, 20).c_str(), 0, 16); } if (inode) { file->set_id(inode); } file->set_path(path); file->set_uid(2); file->set_gid(2); file->set_size(cnt); file->set_layout_id(0x00100002); file->mutable_checksum()->set_value("\0\0\0\1", 4); file->set_flags(0); file->mutable_ctime()->set_sec(tsnow.tv_sec); file->mutable_ctime()->set_n_sec(tsnow.tv_nsec); file->mutable_mtime()->set_sec(tsnow.tv_sec); file->mutable_mtime()->set_n_sec(tsnow.tv_nsec); file->mutable_locations()->Add(65535); auto map = file->mutable_xattrs(); (*map)["sys.acl"] = "u:100:rwx"; (*map)["sys.cta.id"] = "fake"; } request.set_authkey(token()); InsertReply reply; ClientContext context; // The producer-consumer queue we use to communicate asynchronously with the // gRPC runtime. CompletionQueue cq; Status status; std::unique_ptr > rpc( stub_->AsyncFileInsert(&context, request, &cq)); // Request that, upon completion of the RPC, "reply" be updated with the // server's response; "status" with the indication of whether the operation // was successful. Tag the request with the integer 1. rpc->Finish(&reply, &status, (void*) 1); void* got_tag; bool ok = false; // Block until the next result is available in the completion queue "cq". // The return value of Next should always be checked. This return value // tells us whether there is any kind of event or the cq_ is shutting down. ABSL_CHECK(cq.Next(&got_tag, &ok)); // Verify that the result from "cq" corresponds, by its tag, our previous // request. ABSL_CHECK(got_tag == (void*) 1); // ... and that the request was completed successfully. Note that "ok" // corresponds solely to the request for updates introduced by Finish(). ABSL_CHECK(ok); // Act upon the status of the actual RPC. int retc = 0; if (status.ok()) { for (auto it : reply.retc()) { retc |= it; } return retc; } else { return -1; } } int GrpcClient::ContainerInsert(const std::vector& paths) { ContainerInsertRequest request; for (auto it : paths) { std::string path; struct timespec tsnow; eos::common::Timing::GetTimeSpec(tsnow); uint64_t inode = 0 ; if (it.substr(0, 4) == "ino:") { // the format is ino:xxxxxxxxxxxxxxxx: where xxxxxxxxxxxxxxxx is a 64bit hex string of the inode path = it.substr(21); inode = std::strtol(it.substr(4, 20).c_str(), 0, 16); } ContainerMdProto* container = request.add_container(); if (inode) { container->set_id(inode); } container->set_path(path); container->set_uid(2); container->set_gid(2); container->set_mode(S_IFDIR | S_IRWXU); container->mutable_ctime()->set_sec(tsnow.tv_sec); container->mutable_ctime()->set_n_sec(tsnow.tv_nsec); container->mutable_mtime()->set_sec(tsnow.tv_sec); container->mutable_mtime()->set_n_sec(tsnow.tv_nsec); auto map = container->mutable_xattrs(); (*map)["sys.acl"] = "u:100:rwx"; (*map)["sys.forced.checksum"] = "adler"; (*map)["sys.forced.space"] = "default"; (*map)["sys.forced.nstripes"] = "1"; (*map)["sys.forced.layout"] = "replica"; } request.set_authkey(token()); InsertReply reply; ClientContext context; // The producer-consumer queue we use to communicate asynchronously with the // gRPC runtime. CompletionQueue cq; Status status; std::unique_ptr > rpc( stub_->AsyncContainerInsert(&context, request, &cq)); // Request that, upon completion of the RPC, "reply" be updated with the // server's response; "status" with the indication of whether the operation // was successful. Tag the request with the integer 1. rpc->Finish(&reply, &status, (void*) 1); void* got_tag; bool ok = false; // Block until the next result is available in the completion queue "cq". // The return value of Next should always be checked. This return value // tells us whether there is any kind of event or the cq_ is shutting down. ABSL_CHECK(cq.Next(&got_tag, &ok)); // Verify that the result from "cq" corresponds, by its tag, our previous // request. ABSL_CHECK(got_tag == (void*) 1); // ... and that the request was completed successfully. Note that "ok" // corresponds solely to the request for updates introduced by Finish(). ABSL_CHECK(ok); // Act upon the status of the actual RPC. int retc = 0; if (status.ok()) { for (auto it : reply.retc()) { retc |= it; } return retc; } else { return -1; } } std::unique_ptr GrpcClient::Create(std::string endpoint, std::string token, std::string keyfile, std::string certfile, std::string cafile, bool force_ssl) { std::string key; std::string cert; std::string ca; bool ssl_cred = false; if (keyfile.length() || certfile.length() || cafile.length()) { if (!keyfile.length() || !certfile.length() || !cafile.length()) { return 0; } force_ssl = true; ssl_cred = true; if (eos::common::StringConversion::LoadFileIntoString(certfile.c_str(), cert) && !cert.length()) { fprintf(stderr, "error: unable to load ssl certificate file '%s'\n", certfile.c_str()); return 0; } if (eos::common::StringConversion::LoadFileIntoString(keyfile.c_str(), key) && !key.length()) { fprintf(stderr, "unable to load ssl key file '%s'\n", keyfile.c_str()); return 0; } if (eos::common::StringConversion::LoadFileIntoString(cafile.c_str(), ca) && !ca.length()) { fprintf(stderr, "unable to load ssl ca file '%s'\n", cafile.c_str()); return 0; } } grpc::SslCredentialsOptions opts; if (ssl_cred) { opts.pem_root_certs = ca; opts.pem_private_key = key; opts.pem_cert_chain = cert; } std::unique_ptr p(new eos::client::GrpcClient( grpc::CreateChannel( endpoint, (force_ssl ? grpc::SslCredentials(opts) : grpc::InsecureChannelCredentials())))); p->set_token(token); return p; } int GrpcClient::NsStat(const eos::rpc::NsStatRequest& request, eos::rpc::NsStatResponse& reply) { ClientContext context; CompletionQueue cq; Status status; std::unique_ptr> rpc( stub_->AsyncNsStat(&context, request, &cq)); rpc->Finish(&reply, &status, (void*) 1); void* got_tag; bool ok = false; ABSL_CHECK(cq.Next(&got_tag, &ok)); ABSL_CHECK(got_tag == (void*) 1); ABSL_CHECK(ok); // Act upon the status of the actual RPC if (status.ok()) { return reply.code(); } else { return -1; } } int GrpcClient::Exec(const eos::rpc::NSRequest& request, eos::rpc::NSResponse& reply) { ClientContext context; CompletionQueue cq; Status status; std::unique_ptr > rpc( stub_->AsyncExec(&context, request, &cq)); rpc->Finish(&reply, &status, (void*) 1); void* got_tag; bool ok = false; ABSL_CHECK(cq.Next(&got_tag, &ok)); ABSL_CHECK(got_tag == (void*) 1); ABSL_CHECK(ok); // Act upon the status of the actual RPC. if (status.ok()) { return reply.error().code(); } else { return -1; } } std::string GrpcClient::ExportFs(const eos::rpc::MDResponse& response, const std::string& exportfs) { bool first = false; if (response.type() == eos::rpc::CONTAINER) { if (!tree.size()) { first = true; tree[response.cmd().id()] = response.cmd().name() + "/"; } else { first = false; tree[response.cmd().id()] = tree[response.cmd().parent_id()] + response.cmd().name() + "/"; } fprintf(stderr, "%s\n", tree[response.cmd().id()].c_str()); if (!first) { std::string target = exportfs + "/" + tree[response.cmd().id()]; eos::common::Path cPath(target.c_str()); if (!cPath.MakeParentPath(755)) { fprintf(stderr, "error: failed to created '%s'\n", cPath.GetParentPath()); exit(errno); } int rc = mkdir(cPath.GetPath(), 755); if (rc) { fprintf(stderr, "error: failed to created '%s'\n", cPath.GetPath()); exit(errno); } } } if (response.type() == eos::rpc::FILE) { fprintf(stderr, "%s\n", (tree[response.fmd().cont_id()] + response.fmd().name()).c_str()); } return ""; } EOSCLIENTNAMESPACE_END #endif ================================================ FILE: client/grpc/GrpcClient.hh ================================================ // ---------------------------------------------------------------------- // File: GrpcClient.hh // Author: Andreas-Joachim Peters - CERN // ---------------------------------------------------------------------- /************************************************************************ * EOS - the CERN Disk Storage System * * Copyright (C) 2018 CERN/Switzerland * * * * This program is free software: you can redistribute it and/or modify * * it under the terms of the GNU General Public License as published by * * the Free Software Foundation, either version 3 of the License, or * * (at your option) any later version. * * * * This program is distributed in the hope that it will be useful, * * but WITHOUT ANY WARRANTY; without even the implied warranty of * * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * * GNU General Public License for more details. * * * * You should have received a copy of the GNU General Public License * * along with this program. If not, see .* ************************************************************************/ #pragma once #include "client/Namespace.hh" #include "common/AssistedThread.hh" #include #ifdef EOS_GRPC #include #include "proto/Rpc.grpc.pb.h" EOSCLIENTNAMESPACE_BEGIN /** * @file GrpcClient.hh * * @brief This class implements a gRPC client for an EOS grpc server * */ class GrpcClient { public: explicit GrpcClient(std::shared_ptr channel) : stub_(eos::rpc::Eos::NewStub(channel)) { } // convenience factory function static std::unique_ptr Create(std::string endpoint = "localhost:50051", std::string token = "", std::string keyfile = "", std::string certfile = "", std::string cafile = "", bool force_ssl = false); std::string Ping(const std::string& payload); std::string Md(const std::string& path, uint64_t id = 0, uint64_t ino = 0, bool list = false, bool printonly = false); std::string Find(const std::string& path, const std::list& find_options, uint64_t id = 0, uint64_t ino = 0, bool files = true, bool dirs = true, uint64_t depth = 0, bool printonly = false, const std::string& exportfs = ""); int NsStat(const eos::rpc::NsStatRequest& request, eos::rpc::NsStatResponse& reply); int Exec(const eos::rpc::NSRequest& request, eos::rpc::NSResponse& reply); std::string ExportFs(const eos::rpc::MDResponse& response, const std::string& exportfs); int FileInsert(const std::vector& paths); int ContainerInsert(const std::vector& paths); void set_token(const std::string& _token) { mToken = _token; } std::string token() const { return mToken; } private: std::unique_ptr stub_; std::string mToken; std::map tree; }; EOSCLIENTNAMESPACE_END #endif ================================================ FILE: client/grpc/GrpcClientAuthProcessor.hh ================================================ // ---------------------------------------------------------------------- // File: GrpcClientAuthProcessor.hh // Author: Andreas-Joachim Peters - CERN // ---------------------------------------------------------------------- /************************************************************************ * EOS - the CERN Disk Storage System * * Copyright (C) 2018 CERN/Switzerland * * * * This program is free software: you can redistribute it and/or modify * * it under the terms of the GNU General Public License as published by * * the Free Software Foundation, either version 3 of the License, or * * (at your option) any later version. * * * * This program is distributed in the hope that it will be useful, * * but WITHOUT ANY WARRANTY; without even the implied warranty of * * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * * GNU General Public License for more details. * * * * You should have received a copy of the GNU General Public License * * along with this program. If not, see .* ************************************************************************/ #pragma once /*----------------------------------------------------------------------------*/ #include "client/Namespace.hh" /*----------------------------------------------------------------------------*/ /*----------------------------------------------------------------------------*/ #ifdef EOS_GRPC #include /*----------------------------------------------------------------------------*/ EOSCLIENTNAMESPACE_BEGIN /** * @file GrpcClientAuthProcessor.hh * * @brief This class implements an authentication processor for an EOS GRPC client * allowing to extract the client property name * */ class GrpcClientProcessor : public grpc::AuthMetadataProcessor { public: struct Const { static const std::string& TokenKeyName() { static std::string _("token"); return _; } static const std::string& PeerIdentityPropertyName() { static std::string _("username"); return _; } }; grpc::Status Process(const InputMetadata& auth_metadata, grpc::AuthContext* context, OutputMetadata* consumed_auth_metadata, OutputMetadata* response_metadata) override { // determine intercepted method std::string dispatch_keyname = ":path"; auto dispatch_kv = auth_metadata.find(dispatch_keyname); if (dispatch_kv == auth_metadata.end()) { return grpc::Status(grpc::StatusCode::INTERNAL, "Internal Error"); } // if token metadata not necessary, return early, avoid token checking auto dispatch_value = std::string(dispatch_kv->second.data()); if (dispatch_value == "/MyPackage.MyService/Authenticate") { return grpc::Status::OK; } // determine availability of token metadata auto token_kv = auth_metadata.find(Const::TokenKeyName()); if (token_kv == auth_metadata.end()) { return grpc::Status(grpc::StatusCode::UNAUTHENTICATED, "Missing Token"); } // determine validity of token metadata auto token_value = std::string(token_kv->second.data()); if (tokens.count(token_value) == 0) { return grpc::Status(grpc::StatusCode::UNAUTHENTICATED, "Invalid Token"); } // once verified, mark as consumed and store user for later retrieval consumed_auth_metadata->insert(std::make_pair(Const::TokenKeyName(), token_value)); // required context->AddProperty(Const::PeerIdentityPropertyName(), tokens[token_value]); // optional context->SetPeerIdentityPropertyName( Const::PeerIdentityPropertyName()); // optional return grpc::Status::OK; } std::map tokens; }; #endif EOSCLIENTNAMESPACE_END ================================================ FILE: client/grpc/Insert.cc ================================================ #include #include #include #include "client/grpc/GrpcClient.hh" #include #include "common/StringConversion.hh" int usage(const char* prog) { fprintf(stderr, "usage: %s [--key " "--cert " "--ca ] " "[--endpoint ] [--token ] " "[--prefix prefix] " "[--treefile ] " "[--force-ssl] \n", prog); fprintf(stderr, "treefile format providing inodes: \n" "----------------------------------\n" "ino:000000000000ffff:/eos/mydir/\n" "ino:000000000000ff01:/eos/mydir/myfile\n\n"); fprintf(stderr, "treefile format without inodes: \n" "----------------------------------\n" "/eos/mydir/\n" "/eos/mydir/myfile\n\n"); return -1; } int main(int argc, const char* argv[]) { std::string endpoint = "localhost:50051"; std::string token = ""; std::string key; std::string cert; std::string ca; std::string keyfile; std::string certfile; std::string cafile; std::string prefix = "/grpc"; std::string treefile = "namespace.txt"; bool force_ssl = false; for (auto i = 1; i < argc; ++i) { std::string option = argv[i]; if (option == "--key") { if (argc > i + 1) { keyfile = argv[i + 1]; ++i; continue; } else { return usage(argv[0]); } } if (option == "--cert") { if (argc > i + 1) { certfile = argv[i + 1]; ++i; continue; } else { return usage(argv[0]); } } if (option == "--ca") { if (argc > i + 1) { cafile = argv[i + 1]; ++i; continue; } else { return usage(argv[0]); } } if (option == "--endpoint") { if (argc > i + 1) { endpoint = argv[i + 1]; ++i; continue; } else { return usage(argv[0]); } } if (option == "--token") { if (argc > i + 1) { token = argv[i + 1]; ++i; continue; } else { return usage(argv[0]); } } if (option == "--prefix") { if (argc > i + 1) { prefix = argv[i + 1]; ++i; continue; } else { return usage(argv[0]); } } if (option == "--treefile") { if (argc > i + 1) { treefile = argv[i + 1]; ++i; continue; } else { return usage(argv[0]); } } if (option == "--force-ssl") { force_ssl = true; continue; } return usage(argv[0]); } if (keyfile.length() || certfile.length() || cafile.length()) { if (!keyfile.length() || !certfile.length() || !cafile.length()) { return usage(argv[0]); } } std::unique_ptr eosgrpc = eos::client::GrpcClient::Create( endpoint, token, keyfile, certfile, cafile, force_ssl); if (!eosgrpc) { return usage(argv[0]); } std::cout << "=> settings: prefix=" << prefix << " treefile=" << treefile << std::endl; std::ifstream input(treefile); size_t n = 0; size_t bulk = 1000; bool dirmode = true; std::vector paths; std::chrono::steady_clock::time_point watch_global = std::chrono::steady_clock::now(); for (std::string line ; std::getline(input, line);) { n++; if (line.substr(0, 4) == "ino:") { line.insert(21, prefix); } else { line.insert(0, prefix); } std::cout << n << " " << line << std::endl; if (line.back() == '/') { // dir if (dirmode) { paths.push_back(line); } else { // SEND OFF DIRS int retc = eosgrpc->FileInsert(paths); std::cout << "::send::files" << " retc=" << retc << std::endl; paths.clear(); paths.push_back(line); dirmode = true; } } else { // file if (dirmode) { // SEND OFF FILES int retc = eosgrpc->ContainerInsert(paths); std::cout << "::send::dirs " << " retc=" << retc << std::endl; paths.clear(); paths.push_back(line); dirmode = false; } else { paths.push_back(line); } } if (paths.size() >= bulk) { if (dirmode) { // SEND OF DIRS int retc = eosgrpc->ContainerInsert(paths); std::cout << "::send::dirs" << " retc=" << retc << std::endl; paths.clear(); } else { // SEND OF FILES int retc = eosgrpc->FileInsert(paths); std::cout << "::send::files" << " retc=" << retc << std::endl; paths.clear(); } } } std::chrono::microseconds elapsed_global = std::chrono::duration_cast (std::chrono::steady_clock::now() - watch_global); std::cout << n << " requests took " << elapsed_global.count() << " micro seconds" << std::endl; return 0; } ================================================ FILE: client/grpc/Md.cc ================================================ #include #include #include "client/grpc/GrpcClient.hh" #include #include "common/StringConversion.hh" int usage(const char* prog) { fprintf(stderr, "usage: %s [--key " "--cert " "--ca ] " "[--endpoint ] [--token ] [-l] [--force-ssl] \n", prog); return -1; } int main(int argc, const char* argv[]) { std::string endpoint = "localhost:50051"; std::string token = ""; std::string key; std::string cert; std::string ca; std::string keyfile; std::string certfile; std::string cafile; std::string path = ""; bool listing = false; bool force_ssl = false; for (auto i = 1; i < argc; ++i) { std::string option = argv[i]; if (option == "--key") { if (argc > i + 1) { keyfile = argv[i + 1]; ++i; continue; } else { return usage(argv[0]); } } if (option == "--cert") { if (argc > i + 1) { certfile = argv[i + 1]; ++i; continue; } else { return usage(argv[0]); } } if (option == "--ca") { if (argc > i + 1) { cafile = argv[i + 1]; ++i; continue; } else { return usage(argv[0]); } } if (option == "--endpoint") { if (argc > i + 1) { endpoint = argv[i + 1]; ++i; continue; } else { return usage(argv[0]); } } if (option == "--token") { if (argc > i + 1) { token = argv[i + 1]; ++i; continue; } else { return usage(argv[0]); } } if (option == "-l") { listing = true; continue; } if (option == "--force-ssl") { force_ssl = true; continue; } path = option; if (argc > (i + 1)) { return usage(argv[0]); } } if (keyfile.length() || certfile.length() || cafile.length()) { if (!keyfile.length() || !certfile.length() || !cafile.length()) { return usage(argv[0]); } } if (path.empty()) { return usage(argv[0]); } std::unique_ptr eosgrpc = eos::client::GrpcClient::Create( endpoint, token, keyfile, certfile, cafile, force_ssl); if (!eosgrpc) { return usage(argv[0]); } std::chrono::steady_clock::time_point watch_global = std::chrono::steady_clock::now(); std::string reply = eosgrpc->Md(path, 0, 0, listing, true); std::chrono::microseconds elapsed_global = std::chrono::duration_cast (std::chrono::steady_clock::now() - watch_global); std::cout << "request took " << elapsed_global.count() << " micro seconds" << std::endl; return 0; } ================================================ FILE: client/grpc/Ns.cc ================================================ #include #include #include "client/grpc/GrpcClient.hh" #include #include "common/StringConversion.hh" #include "proto/Recycle.pb.h" #include "console/commands/helpers/RecycleHelper.hh" #include int usage(const char* prog) { fprintf(stderr, "usage: %s [--key " "--cert " "--ca ] " "--ca ] " "[--endpoint ] [--token ] [--xattr ] [--mode ] [--username ] [ [--groupname ] [--uid ] [--gid ] [--app ] [--owner-uid ] [--owner-gid ] [--acl ] [--sysacl] [--norecycle] [--force-ssl] [-r] [--max-version ] [--target ] [--year ] [--month ] [--day ] [--inodes <#>] [--volume <#>] [--quota volume|inode] [--position ] [--front] -p \n", prog); fprintf(stderr, " [-r] -p mkdir\n" " [-r] -p rmdir\n" " -p touch\n" " [--norecycle] -p rm\n" " --target -p rename\n" " --target -p symlink\n" " [-r] --xattr -p setxattr # sets key=val\n" " [-r] --xattr -p setxattr # deletes key\n" " --owner-uid --owner-gid -p chown\n" " --mode -p chmod\n" " [--sysacl] [-r] [--acl ] [--position ] [--front] -p acl\n" " --ztoken | [--acl] [-r] -p token\n" " [--max-version -p create-version\n" " -p list-version\n" " [--max-version -p purge-version\n" " [--max-version -p purge-version\n" " [--max-version -p purge-version\n" " old_recycle ls\n" " -p old_recycle restore\n" " --year [--month [--day ]] old_recycle purge\n" " -p old_recycle purge\n" " recycle ls [ []] [-m] [-n] [--all] [--rid ]\n" " recycle purge [--all] [--uid] [--rid ] | -k ]\n" " recycle restore [-p] [-f|--force-original-name] [-r|--restore-versions] \n" " recycle project --path [--acl ]\n" " recycle config [--add-bin|--remove-bin ] [--lifetime ] [--ratio ] [--size ] [--inodes ] [--collect-interval ] [--remove-interval ] [--dry-run ] [--dump]\n" "[--username | --groupname ] [-p ] quota get\n" "[--username | --groupname ] [-p ] --inodes <#> --volume <#> --quota user|group|project quota set\n" "[--username | --groupname ] [-p ] quota rm\n" " [-p ] quota rmnode\n"); return -1; } int ParseRecycleCommand(int argc, const char* argv[], int arg_index, std::string& subcmd, std::string& path, eos::rpc::NSRequest& request) { std::string command_line; if (subcmd.empty()) { command_line = subcmd; } else { for (int i = arg_index; i < argc; i++) { command_line += argv[i]; command_line += " "; } // Remove trailing space command_line.pop_back(); } GlobalOptions opts; RecycleHelper recycle_helper(opts); if (recycle_helper.ParseCommand(command_line.c_str())) { request.mutable_recycle()->CopyFrom(recycle_helper.GetRequest().recycle()); return 0; } else { std::cerr << "error: failed to parse recycle command " << command_line << std::endl; return EINVAL; } } int main(int argc, const char* argv[]) { std::string endpoint = "localhost:50051"; std::string token = ""; std::string key; std::string cert; std::string ca; std::string keyfile; std::string certfile; std::string cafile; std::string cmd = ""; std::string subcmd = ""; std::string path = ""; std::string target = ""; std::string xattr = ""; std::string acl = ""; mode_t mode = 0775; int64_t max_version = -1; uid_t uid = 0; gid_t gid = 0; std::string app; uint32_t day = 0; uint32_t month = 0; uint32_t year = 0; uint64_t inodes = 0; uint64_t volume = 0; std::string qtype; std::string username; std::string groupname; uid_t owner_uid = 0; gid_t owner_gid = 0; bool recursive = false; bool norecycle = false; bool sysacl = false; uint32_t position = 0; std::string eostoken = ""; bool force_ssl = false; int arg_index = 0; for (auto i = 1; i < argc; ++i) { std::string option = argv[i]; if (option == "--key") { if (argc > i + 1) { keyfile = argv[i + 1]; ++i; continue; } else { return usage(argv[0]); } } if (option == "--cert") { if (argc > i + 1) { certfile = argv[i + 1]; ++i; continue; } else { return usage(argv[0]); } } if (option == "--ca") { if (argc > i + 1) { cafile = argv[i + 1]; ++i; continue; } else { return usage(argv[0]); } } if (option == "--endpoint") { if (argc > i + 1) { endpoint = argv[i + 1]; ++i; continue; } else { return usage(argv[0]); } } if (option == "--token") { if (argc > i + 1) { token = argv[i + 1]; ++i; continue; } else { return usage(argv[0]); } } if (option == "--uid") { if (argc > i + 1) { uid = strtoul(argv[i + 1], 0, 10); ++i; continue; } else { return usage(argv[0]); } } if (option == "--gid") { if (argc > i + 1) { gid = strtoul(argv[i + 1], 0, 10); ++i; continue; } else { return usage(argv[0]); } } if (option == "--app") { if (argc > i + 1) { app = argv[i + 1]; ++i; continue; } else { return usage(argv[0]); } } if (option == "--inodes") { if (argc > i + 1) { inodes = strtoul(argv[i + 1], 0, 10); ++i; continue; } else { return usage(argv[0]); } } if (option == "--volume") { if (argc > i + 1) { volume = strtoul(argv[i + 1], 0, 10); ++i; continue; } else { return usage(argv[0]); } } if (option == "--quota") { if (argc > i + 1) { qtype = argv[i + 1]; ++i; continue; } else { return usage(argv[0]); } } if (option == "--username") { if (argc > i + 1) { username = argv[i + 1]; ++i; continue; } else { return usage(argv[0]); } } if (option == "--year") { if (argc > i + 1) { year = strtoul(argv[i + 1], 0, 10); ++i; continue; } else { return usage(argv[0]); } } if (option == "--month") { if (argc > i + 1) { month = strtoul(argv[i + 1], 0, 10); ++i; continue; } else { return usage(argv[0]); } } if (option == "--day") { if (argc > i + 1) { day = strtoul(argv[i + 1], 0, 10); ++i; continue; } else { return usage(argv[0]); } } if (option == "--groupname") { if (argc > i + 1) { groupname = argv[i + 1]; ++i; continue; } else { return usage(argv[0]); } } if (option == "--owner-uid") { if (argc > i + 1) { owner_uid = strtoul(argv[i + 1], 0, 10); ++i; continue; } else { return usage(argv[0]); } } if (option == "--owner-gid") { if (argc > i + 1) { owner_gid = strtoul(argv[i + 1], 0, 10); ++i; continue; } else { return usage(argv[0]); } } if (option == "-p") { if (argc > i + 1) { path = argv[i + 1]; ++i; continue; } else { return usage(argv[0]); } } if (option == "--target") { if (argc > i + 1) { target = argv[i + 1]; ++i; continue; } else { return usage(argv[0]); } } if (option == "--acl") { if (argc > i + 1) { acl = argv[i + 1]; ++i; continue; } else { return usage(argv[0]); } } if (option == "--position") { if (position) { std::cout << "Please specify only one of --front or --position" << std::endl; return usage(argv[0]); } if (argc > i + 1) { try { position = std::stoi(argv[i + 1]); ++i; } catch (std::exception& e) { return usage(argv[0]); } continue; } else { return usage(argv[0]); } } if (option == "--front") { if (position) { std::cout << "Please specify only one of --front or --position" << std::endl; return usage(argv[0]); } position = 1; continue; } if (option == "--mode") { if (argc > i + 1) { mode = strtol(argv[i + 1], 0, 8); ++i; continue; } else { return usage(argv[0]); } } if (option == "--max-version") { if (argc > i + 1) { max_version = strtol(argv[i + 1], 0, 10); ++i; continue; } else { return usage(argv[0]); } } if (option == "--xattr") { if (argc > i + 1) { xattr = argv[i + 1]; ++i; continue; } else { return usage(argv[0]); } } if (option == "-r") { recursive = true; continue; } if (option == "--sysacl") { sysacl = true; continue; } if (option == "--norecycle") { norecycle = true; continue; } if (option == "--ztoken") { if (argc > i + 1) { eostoken = argv[i + 1]; ++i; continue; } else { return usage(argv[0]); } } if (option == "--force-ssl") { force_ssl = true; continue; } cmd = option; if (argc > (i + 1)) { if (cmd == "old_recycle") { subcmd = argv[i + 1]; if ((subcmd != "ls") && (subcmd != "restore") && (subcmd != "purge")) { return usage(argv[0]); } break; } if (cmd == "recycle") { arg_index = i; subcmd = argv[i + 1]; if ((subcmd != "ls") && (subcmd != "restore") && (subcmd != "purge") && (subcmd != "config") && (subcmd != "project")) { return usage(argv[0]); } break; } if (cmd == "quota") { subcmd = argv[i + 1]; if ((subcmd != "get") && (subcmd != "set") && (subcmd != "rm") && (subcmd != "rmnode")) { return usage(argv[0]); } break; } return usage(argv[0]); } } if (keyfile.length() || certfile.length() || cafile.length()) { if (!keyfile.length() || !certfile.length() || !cafile.length()) { return usage(argv[0]); } } if (cmd.empty() || ((cmd != "quota") && (cmd != "old_recycle") && (cmd != "recycle") && path.empty() && eostoken.empty())) { return usage(argv[0]); } std::unique_ptr eosgrpc = eos::client::GrpcClient::Create( endpoint, token, keyfile, certfile, cafile, force_ssl); if (!eosgrpc) { return usage(argv[0]); } std::chrono::steady_clock::time_point watch_global = std::chrono::steady_clock::now(); eos::rpc::NSRequest request; eos::rpc::NSResponse reply; request.set_authkey(token); if (uid) { request.mutable_role()->set_uid(uid); } if (gid) { request.mutable_role()->set_gid(gid); } if (app.length()) { request.mutable_role()->set_app(app); } google::protobuf::util::JsonPrintOptions options; options.add_whitespace = true; #if GOOGLE_PROTOBUF_VERSION >= 5027000 options.always_print_fields_with_no_presence = true; #else options.always_print_primitive_fields = true; #endif std::string jsonstring; if (cmd == "mkdir") { request.mutable_mkdir()->mutable_id()->set_path(path); if (recursive) { request.mutable_mkdir()->set_recursive(true); } request.mutable_mkdir()->set_mode(mode); } else if (cmd == "rmdir") { request.mutable_rmdir()->mutable_id()->set_path(path); } else if (cmd == "touch") { request.mutable_touch()->mutable_id()->set_path(path); } else if (cmd == "unlink") { request.mutable_unlink()->mutable_id()->set_path(path); if (norecycle) { request.mutable_unlink()->set_norecycle(norecycle); } } else if (cmd == "rm") { request.mutable_rm()->mutable_id()->set_path(path); if (norecycle) { request.mutable_rm()->set_norecycle(norecycle); } if (recursive) { request.mutable_rm()->set_recursive(recursive); } } else if (cmd == "rename") { request.mutable_rename()->mutable_id()->set_path(path); request.mutable_rename()->set_target(target); } else if (cmd == "symlink") { request.mutable_symlink()->mutable_id()->set_path(path); request.mutable_symlink()->set_target(target); } else if (cmd == "setxattr") { request.mutable_xattr()->set_recursive(recursive); request.mutable_xattr()->mutable_id()->set_path(path); std::string key, val; eos::common::StringConversion::SplitKeyValue(xattr, key, val, "="); if (key.front() == '!') { // add as a deletion key auto x = request.mutable_xattr()->add_keystodelete(); *x = key.substr(1); } else { // add as a new attribute key (*(request.mutable_xattr()->mutable_xattrs()))[key] = val; } } else if (cmd == "chown") { // run as root request.mutable_chown()->mutable_id()->set_path(path); request.mutable_chown()->mutable_owner()->set_uid(owner_uid); request.mutable_chown()->mutable_owner()->set_gid(owner_gid); } else if (cmd == "chmod") { request.mutable_chmod()->mutable_id()->set_path(path); request.mutable_chmod()->set_mode(mode); } else if (cmd == "create-version") { request.mutable_version()->set_cmd(eos::rpc::NSRequest::VersionRequest::CREATE); request.mutable_version()->mutable_id()->set_path(path); request.mutable_version()->set_maxversion(max_version); } else if (cmd == "list-version") { request.mutable_version()->set_cmd(eos::rpc::NSRequest::VersionRequest::LIST); request.mutable_version()->mutable_id()->set_path(path); } else if (cmd == "purge-version") { request.mutable_version()->set_cmd(eos::rpc::NSRequest::VersionRequest::PURGE); request.mutable_version()->mutable_id()->set_path(path); request.mutable_version()->set_maxversion(max_version); } else if (cmd == "acl") { if (acl.empty()) { // list acl request.mutable_acl()->set_cmd(eos::rpc::NSRequest::AclRequest::LIST); } else { // modify acl request.mutable_acl()->set_cmd(eos::rpc::NSRequest::AclRequest::MODIFY); request.mutable_acl()->set_rule(acl); } request.mutable_acl()->mutable_id()->set_path(path); if (recursive) { request.mutable_acl()->set_recursive(true); } if (sysacl) { request.mutable_acl()->set_type(eos::rpc::NSRequest::AclRequest::SYS_ACL); } else { request.mutable_acl()->set_type(eos::rpc::NSRequest::AclRequest::USER_ACL); } if (position) { request.mutable_acl()->set_position(position); } } else if (cmd == "token") { request.mutable_token()->mutable_token()->mutable_token()->set_expires(time( NULL) + 300); if (!path.empty()) { request.mutable_token()->mutable_token()->mutable_token()->set_path(path); } if (recursive) { request.mutable_token()->mutable_token()->mutable_token()->set_allowtree(true); } if (acl.empty()) { request.mutable_token()->mutable_token()->mutable_token()->set_permission("rx"); } else { request.mutable_token()->mutable_token()->mutable_token()->set_permission(acl); } if (!eostoken.empty()) { request.mutable_token()->mutable_token()->mutable_token()->set_vtoken(eostoken); } } else if (cmd == "quota") { if (username.length()) { request.mutable_quota()->mutable_id()->set_username(username); } if (groupname.length()) { request.mutable_quota()->mutable_id()->set_groupname(groupname); } request.mutable_quota()->set_path(path); if (subcmd == "get") { request.mutable_quota()->set_op(eos::rpc::GET); } if (subcmd == "set") { request.mutable_quota()->set_op(eos::rpc::SET); request.mutable_quota()->set_maxfiles(inodes); request.mutable_quota()->set_maxbytes(volume); } if (subcmd == "rm") { request.mutable_quota()->set_op(eos::rpc::RM); request.mutable_quota()->set_entry(eos::rpc::NONE); if (qtype == "volume") { request.mutable_quota()->set_entry(eos::rpc::VOLUME); } if (qtype == "inode") { request.mutable_quota()->set_entry(eos::rpc::INODE); } } if (subcmd == "rmnode") { request.mutable_quota()->set_op(eos::rpc::RMNODE); } } else if (cmd == "old_recycle") { if ((subcmd == "") || (subcmd == "ls")) { request.mutable_old_recycle()->set_cmd( eos::rpc::NSRequest::RecycleRequest::LIST); } else if (subcmd == "purge") { if (year) { request.mutable_old_recycle()->mutable_purgedate()->set_year(year); } if (month) { request.mutable_old_recycle()->mutable_purgedate()->set_month(month); } if (day) { request.mutable_old_recycle()->mutable_purgedate()->set_day(day); } request.mutable_old_recycle()->set_key(path); request.mutable_old_recycle()->set_cmd( eos::rpc::NSRequest::RecycleRequest::PURGE); } else if (subcmd == "restore") { request.mutable_old_recycle()->set_cmd( eos::rpc::NSRequest::RecycleRequest::RESTORE); request.mutable_old_recycle()->set_key(path); } else { std::cerr << "invalid recycle request" << std::endl; return EINVAL; } } else if (cmd == "recycle") { if (ParseRecycleCommand(argc, argv, arg_index + 1, subcmd, path, request)) { return EINVAL; } } (void) google::protobuf::util::MessageToJsonString(request, &jsonstring, options); std::cout << "request: " << std::endl << jsonstring << std::endl; int retc = EIO; if (eosgrpc->Exec(request, reply)) { std::cerr << "grpc request failed" << std::endl; } else { retc = reply.error().code(); } jsonstring = ""; (void) google::protobuf::util::MessageToJsonString(reply, &jsonstring, options); std::cout << "reply: " << std::endl << jsonstring << std::endl; std::chrono::microseconds elapsed_global = std::chrono::duration_cast (std::chrono::steady_clock::now() - watch_global); std::cout << "request took " << elapsed_global.count() << " micro seconds" << std::endl; return retc; } ================================================ FILE: client/grpc/NsStat.cc ================================================ /************************************************************************ * EOS - the CERN Disk Storage System * * Copyright (C) 2018 CERN/Switzerland * * * * This program is free software: you can redistribute it and/or modify * * it under the terms of the GNU General Public License as published by * * the Free Software Foundation, either version 3 of the License, or * * (at your option) any later version. * * * * This program is distributed in the hope that it will be useful, * * but WITHOUT ANY WARRANTY; without even the implied warranty of * * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * * GNU General Public License for more details. * * * * You should have received a copy of the GNU General Public License * * along with this program. If not, see .* ************************************************************************/ #include #include #include #include #include "client/grpc/GrpcClient.hh" int usage(const char* name) { std::ostringstream oss; oss << "usage: " << name << " [--key --cert --ca ]" << " [--token ]" << std::endl << std::setw(strlen(name) + 8) << "" << "[--endpoint ] [-d|--debug] [-h|--help] [--force-ssl]" << std::endl; std::cerr << oss.str(); return -1; } int main(int argc, char* argv[]) { using eos::client::GrpcClient; std::string endpoint{"localhost:50051"}; std::string keyfile; std::string certfile; std::string cafile; std::string token; bool debug = false; bool force_ssl = false; while (true) { static struct option long_options[] { {"key", required_argument, 0, 'k'}, {"cert", required_argument, 0, 'c'}, {"ca", required_argument, 0, 'a'}, {"endpoint", required_argument, 0, 'e'}, {"token", required_argument, 0, 't'}, {"debug", no_argument, 0, 'd'}, {"help", no_argument, 0, 'h'}, {"force-ssl", no_argument, 0, 's'}, {0, 0, 0, 0} }; int option_index = 0; int c = getopt_long(argc, argv, "k:c:a:e:t:dhs", long_options, &option_index); // Detect end of the options if (c == -1) { break; } switch (c) { case 'k': keyfile = optarg; break; case 'c': certfile = optarg; break; case 'a': cafile = optarg; break; case 'e': endpoint = optarg; break; case 't': token = optarg; break; case 'd': debug = true; break; case 's': force_ssl = true; break; case 'h': return usage(argv[0]); default: return usage(argv[0]); } } // Make sure all elements are present if certificate authentication is used if (keyfile.length() || certfile.length() || cafile.length()) { if (!keyfile.length() || !certfile.length() || !cafile.length()) { return usage(argv[0]); } } std::unique_ptr eosgrpc = GrpcClient::Create(endpoint, token, keyfile, certfile, cafile, force_ssl); if (!eosgrpc) { std::cerr << "Failed to create grpc client object!" << std::endl; return -1; } auto start_time = std::chrono::steady_clock::now(); google::protobuf::util::JsonPrintOptions options; #if GOOGLE_PROTOBUF_VERSION >= 5027000 options.always_print_fields_with_no_presence = true; #else options.always_print_primitive_fields = true; #endif options.add_whitespace = true; std::string jsonstring; eos::rpc::NsStatRequest request; eos::rpc::NsStatResponse reply; request.set_authkey(token); if (debug) { (void) google::protobuf::util::MessageToJsonString(request, &jsonstring, options); std::cout << "request: " << std::endl << jsonstring << std::endl; } if (eosgrpc->NsStat(request, reply)) { std::cerr << "GRPC request field" << std::endl; debug = true; } if (debug) { std::cout << "reply: " << std::endl; } jsonstring = ""; (void) google::protobuf::util::MessageToJsonString(reply, &jsonstring, options); std::cout << jsonstring << std::endl; auto elapsed_time = std::chrono::duration_cast( std::chrono::steady_clock::now() - start_time); if (debug) { std::cout << "request took " << elapsed_time.count() << " microseconds" << std::endl; } return reply.code(); } ================================================ FILE: client/grpc/Ping.cc ================================================ #include #include #include "client/grpc/GrpcClient.hh" #include #include "common/StringConversion.hh" int usage(const char* prog) { fprintf(stderr, "usage: %s [--size pingsize (max 4M)] \n" " [--key \n" " --cert \n" " --ca ]\n" " [--endpoint ]\n" " [--token ]\n" " [--force-ssl]\n", prog); return -1; } int main(int argc, const char* argv[]) { std::string endpoint = "localhost:50051"; std::string token = ""; std::string key; std::string cert; std::string ca; std::string keyfile; std::string certfile; std::string cafile; bool force_ssl = false; size_t ping_size = 0 ; for (auto i = 1; i < argc; ++i) { std::string option = argv[i]; if (option == "--key") { if (argc > i + 1) { keyfile = argv[i + 1]; ++i; continue; } else { return usage(argv[0]); } } if (option == "--cert") { if (argc > i + 1) { certfile = argv[i + 1]; ++i; continue; } else { return usage(argv[0]); } } if (option == "--ca") { if (argc > i + 1) { cafile = argv[i + 1]; ++i; continue; } else { return usage(argv[0]); } } if (option == "--endpoint") { if (argc > i + 1) { endpoint = argv[i + 1]; ++i; continue; } else { return usage(argv[0]); } } if (option == "--token") { if (argc > i + 1) { token = argv[i + 1]; ++i; continue; } else { return usage(argv[0]); } } if (option == "--size") { if (argc > i + 1) { ping_size = std::strtoul(argv[i + 1], 0, 10); ++i; continue; } else { return usage(argv[0]); } } if (option == "--force-ssl") { force_ssl = true; ++i; continue; } return usage(argv[0]); } if (keyfile.length() || certfile.length() || cafile.length()) { if (!keyfile.length() || !certfile.length() || !cafile.length()) { return usage(argv[0]); } } if (ping_size > (4 * 1000000)) { return usage(argv[0]); } std::unique_ptr eosgrpc = eos::client::GrpcClient::Create (endpoint, token, keyfile, certfile, cafile, force_ssl); if (!eosgrpc) { return usage(argv[0]); } std::string message("ping"); if (ping_size) { message.resize(ping_size); } std::chrono::steady_clock::time_point watch_global = std::chrono::steady_clock::now(); int n_requests = 100; for (auto i = 0; i < n_requests; ++i) { std::chrono::steady_clock::time_point watch_local = std::chrono::steady_clock::now(); std::string reply = eosgrpc->Ping(message); if (reply != message) { std::cout << "request: failed/timeout" << std::endl; } else { std::chrono::microseconds elapsed_local = std::chrono::duration_cast (std::chrono::steady_clock::now() - watch_local); std::cout << "request: " << message.length() << " reply: " << reply.length() << " timing: " << elapsed_local.count() << " micro seconds" << std::endl; } } std::chrono::microseconds elapsed_global = std::chrono::duration_cast (std::chrono::steady_clock::now() - watch_global); std::cout << n_requests << " requests took " << elapsed_global.count() << " micro seconds" << std::endl; return 0; } ================================================ FILE: cmake/CPUArchFlags.cmake ================================================ # ---------------------------------------------------------------------- # File: CMakeLists.txt # Author: Abhishek Lekshmanan # ---------------------------------------------------------------------- # ************************************************************************ # * EOS - the CERN Disk Storage System * # * Copyright (C) 2021 CERN/Switzerland * # * * # * This program is free software: you can redistribute it and/or modify * # * it under the terms of the GNU General Public License as published by * # * the Free Software Foundation, either version 3 of the License, or * # * (at your option) any later version. * # * * # * This program is distributed in the hope that it will be useful, * # * but WITHOUT ANY WARRANTY; without even the implied warranty of * # * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * # * GNU General Public License for more details. * # * * # * You should have received a copy of the GNU General Public License * # * along with this program. If not, see .* # ************************************************************************ include(CheckCXXCompilerFlag) if (CMAKE_SYSTEM_PROCESSOR MATCHES "x86_64|amd64") # Bypass everything if NO_SSE is set set(AMD64_BUILD ON) if (NO_SSE) # Some old hardware does not have sse instructions support, allow switch-off. message(NOTICE "SSE extensions not enabled") CHECK_CXX_COMPILER_FLAG(-mcrc32 HAVE_CRC32) if (HAVE_CRC32) set(CPU_ARCH_FLAGS "-mcrc32") endif() # HAVE_CRC32 else() #find cpu features CHECK_CXX_COMPILER_FLAG(-msse4.2 HAVE_SSE42) if (HAVE_SSE42) set(CPU_ARCH_FLAGS "-msse4.2") endif() # HAVE_SSE42 CHECK_CXX_COMPILER_FLAG(-mavx512f HAVE_AVX512F) CHECK_CXX_COMPILER_FLAG(-mavx512vl HAVE_AVX512L) if(HAVE_AVX512F AND HAVE_AVX512L) set(HAVE_AVX512 1) endif() CHECK_CXX_COMPILER_FLAG(-mavx2 HAVE_AVX2) endif() # NO_SSE elseif (CMAKE_SYSTEM_PROCESSOR MATCHES "arm64|aarch64") set(ARM64_BUILD ON) CHECK_CXX_COMPILER_FLAG(-march=armv8-a+crc+crypto HAVE_ARMV8_CRC_CRYPTO) CHECK_CXX_COMPILER_FLAG(-march=armv8-a+crc HAVE_ARMV8_CRC) if (HAVE_ARMV8_CRC_CRYPTO) set(CPU_ARCH_FLAGS "-march=armv8-a+crc+crypto") elseif (HAVE_ARMV8_CRC) set(CPU_ARCH_FLAGS "-march=armv8-a+crc") endif() # CRC/CRYPTO CHECK_CXX_COMPILER_FLAG(-mfpu=neon HAVE_ARM_NEON) else() message(WARNING "Could not determine platform. No cpu accel. will be used ") endif() # SYSTEM_PROCESSOR add_compile_options(${CPU_ARCH_FLAGS}) add_link_options(${CPU_ARCH_FLAGS}) ================================================ FILE: cmake/DownloadProject.CMakeLists.cmake.in ================================================ # Distributed under the OSI-approved MIT License. See accompanying # file LICENSE or https://github.com/Crascit/DownloadProject for details. cmake_minimum_required(VERSION 3.16...3.30 FATAL_ERROR) project(${DL_ARGS_PROJ}-download NONE) include(ExternalProject) ExternalProject_Add(${DL_ARGS_PROJ}-download ${DL_ARGS_UNPARSED_ARGUMENTS} SOURCE_DIR "${DL_ARGS_SOURCE_DIR}" BINARY_DIR "${DL_ARGS_BINARY_DIR}" CONFIGURE_COMMAND "" BUILD_COMMAND "" INSTALL_COMMAND "" TEST_COMMAND "" ) ================================================ FILE: cmake/DownloadProject.cmake ================================================ # Distributed under the OSI-approved MIT License. See accompanying # file LICENSE or https://github.com/Crascit/DownloadProject for details. # # MODULE: DownloadProject # # PROVIDES: # download_project( PROJ projectName # [PREFIX prefixDir] # [DOWNLOAD_DIR downloadDir] # [SOURCE_DIR srcDir] # [BINARY_DIR binDir] # [QUIET] # ... # ) # # Provides the ability to download and unpack a tarball, zip file, git repository, # etc. at configure time (i.e. when the cmake command is run). How the downloaded # and unpacked contents are used is up to the caller, but the motivating case is # to download source code which can then be included directly in the build with # add_subdirectory() after the call to download_project(). Source and build # directories are set up with this in mind. # # The PROJ argument is required. The projectName value will be used to construct # the following variables upon exit (obviously replace projectName with its actual # value): # # projectName_SOURCE_DIR # projectName_BINARY_DIR # # The SOURCE_DIR and BINARY_DIR arguments are optional and would not typically # need to be provided. They can be specified if you want the downloaded source # and build directories to be located in a specific place. The contents of # projectName_SOURCE_DIR and projectName_BINARY_DIR will be populated with the # locations used whether you provide SOURCE_DIR/BINARY_DIR or not. # # The DOWNLOAD_DIR argument does not normally need to be set. It controls the # location of the temporary CMake build used to perform the download. # # The PREFIX argument can be provided to change the base location of the default # values of DOWNLOAD_DIR, SOURCE_DIR and BINARY_DIR. If all of those three arguments # are provided, then PREFIX will have no effect. The default value for PREFIX is # CMAKE_BINARY_DIR. # # The QUIET option can be given if you do not want to show the output associated # with downloading the specified project. # # In addition to the above, any other options are passed through unmodified to # ExternalProject_Add() to perform the actual download, patch and update steps. # The following ExternalProject_Add() options are explicitly prohibited (they # are reserved for use by the download_project() command): # # CONFIGURE_COMMAND # BUILD_COMMAND # INSTALL_COMMAND # TEST_COMMAND # # Only those ExternalProject_Add() arguments which relate to downloading, patching # and updating of the project sources are intended to be used. Also note that at # least one set of download-related arguments are required. # # If using CMake 3.2 or later, the UPDATE_DISCONNECTED option can be used to # prevent a check at the remote end for changes every time CMake is run # after the first successful download. See the documentation of the ExternalProject # module for more information. It is likely you will want to use this option if it # is available to you. Note, however, that the ExternalProject implementation contains # bugs which result in incorrect handling of the UPDATE_DISCONNECTED option when # using the URL download method or when specifying a SOURCE_DIR with no download # method. Fixes for these have been created, the last of which is scheduled for # inclusion in CMake 3.8.0. Details can be found here: # # https://gitlab.kitware.com/cmake/cmake/commit/bdca68388bd57f8302d3c1d83d691034b7ffa70c # https://gitlab.kitware.com/cmake/cmake/issues/16428 # # If you experience build errors related to the update step, consider avoiding # the use of UPDATE_DISCONNECTED. # # EXAMPLE USAGE: # # include(DownloadProject) # download_project(PROJ googletest # GIT_REPOSITORY https://github.com/google/googletest.git # GIT_TAG master # UPDATE_DISCONNECTED 1 # QUIET # ) # # add_subdirectory(${googletest_SOURCE_DIR} ${googletest_BINARY_DIR}) # #======================================================================================== set(_DownloadProjectDir "${CMAKE_CURRENT_LIST_DIR}") include(CMakeParseArguments) function(download_project) set(options QUIET) set(oneValueArgs PROJ PREFIX DOWNLOAD_DIR SOURCE_DIR BINARY_DIR # Prevent the following from being passed through CONFIGURE_COMMAND BUILD_COMMAND INSTALL_COMMAND TEST_COMMAND ) set(multiValueArgs "") cmake_parse_arguments(DL_ARGS "${options}" "${oneValueArgs}" "${multiValueArgs}" ${ARGN}) # Hide output if requested if (DL_ARGS_QUIET) set(OUTPUT_QUIET "OUTPUT_QUIET") else() unset(OUTPUT_QUIET) message(STATUS "Downloading/updating ${DL_ARGS_PROJ}") endif() # Set up where we will put our temporary CMakeLists.txt file and also # the base point below which the default source and binary dirs will be. # The prefix must always be an absolute path. if (NOT DL_ARGS_PREFIX) set(DL_ARGS_PREFIX "${CMAKE_BINARY_DIR}") else() get_filename_component(DL_ARGS_PREFIX "${DL_ARGS_PREFIX}" ABSOLUTE BASE_DIR "${CMAKE_CURRENT_BINARY_DIR}") endif() if (NOT DL_ARGS_DOWNLOAD_DIR) set(DL_ARGS_DOWNLOAD_DIR "${DL_ARGS_PREFIX}/${DL_ARGS_PROJ}-download") endif() # Ensure the caller can know where to find the source and build directories if (NOT DL_ARGS_SOURCE_DIR) set(DL_ARGS_SOURCE_DIR "${DL_ARGS_PREFIX}/${DL_ARGS_PROJ}-src") endif() if (NOT DL_ARGS_BINARY_DIR) set(DL_ARGS_BINARY_DIR "${DL_ARGS_PREFIX}/${DL_ARGS_PROJ}-build") endif() set(${DL_ARGS_PROJ}_SOURCE_DIR "${DL_ARGS_SOURCE_DIR}" PARENT_SCOPE) set(${DL_ARGS_PROJ}_BINARY_DIR "${DL_ARGS_BINARY_DIR}" PARENT_SCOPE) # The way that CLion manages multiple configurations, it causes a copy of # the CMakeCache.txt to be copied across due to it not expecting there to # be a project within a project. This causes the hard-coded paths in the # cache to be copied and builds to fail. To mitigate this, we simply # remove the cache if it exists before we configure the new project. It # is safe to do so because it will be re-generated. Since this is only # executed at the configure step, it should not cause additional builds or # downloads. file(REMOVE "${DL_ARGS_DOWNLOAD_DIR}/CMakeCache.txt") # Create and build a separate CMake project to carry out the download. # If we've already previously done these steps, they will not cause # anything to be updated, so extra rebuilds of the project won't occur. # Make sure to pass through CMAKE_MAKE_PROGRAM in case the main project # has this set to something not findable on the PATH. configure_file("${_DownloadProjectDir}/DownloadProject.CMakeLists.cmake.in" "${DL_ARGS_DOWNLOAD_DIR}/CMakeLists.txt") execute_process(COMMAND ${CMAKE_COMMAND} -G "${CMAKE_GENERATOR}" -D "CMAKE_MAKE_PROGRAM:FILE=${CMAKE_MAKE_PROGRAM}" . RESULT_VARIABLE result ${OUTPUT_QUIET} WORKING_DIRECTORY "${DL_ARGS_DOWNLOAD_DIR}" ) if(result) message(FATAL_ERROR "CMake step for ${DL_ARGS_PROJ} failed: ${result}") endif() execute_process(COMMAND ${CMAKE_COMMAND} --build . RESULT_VARIABLE result ${OUTPUT_QUIET} WORKING_DIRECTORY "${DL_ARGS_DOWNLOAD_DIR}" ) if(result) message(FATAL_ERROR "Build step for ${DL_ARGS_PROJ} failed: ${result}") endif() endfunction() ================================================ FILE: cmake/EosCompileFlags.cmake ================================================ # ---------------------------------------------------------------------- # File: CMakeLists.txt # Author: Mihai Patrascoiu # ---------------------------------------------------------------------- # ************************************************************************ # * EOS - the CERN Disk Storage System * # * Copyright (C) 2019 CERN/Switzerland * # * * # * This program is free software: you can redistribute it and/or modify * # * it under the terms of the GNU General Public License as published by * # * the Free Software Foundation, either version 3 of the License, or * # * (at your option) any later version. * # * * # * This program is distributed in the hope that it will be useful, * # * but WITHOUT ANY WARRANTY; without even the implied warranty of * # * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * # * GNU General Public License for more details. * # * * # * You should have received a copy of the GNU General Public License * # * along with this program. If not, see .* # ************************************************************************ #------------------------------------------------------------------------------- # Require C++17 #------------------------------------------------------------------------------- set(CMAKE_CXX_STANDARD 17 CACHE STRING "C++ Standard") set(CMAKE_CXX_STANDARD_REQUIRED ON) set(CMAKE_CXX_EXTENSIONS FALSE) # Avoid having CMAKE treat include directories on imported libraries as systems # includes. In newer gcc versions the systems includes are added using the # "-isystem" flag instead of "-I". This currently breaks the build on Fedora 36 # and 37. set(CMAKE_NO_SYSTEM_FROM_IMPORTED TRUE) add_compile_definitions(EOSCITRINE VERSION="${VERSION}" RELEASE="${RELEASE}") #------------------------------------------------------------------------------- # Compile Options #------------------------------------------------------------------------------- add_compile_options(-Wall # Keeping this in for OpenSSL the new EVP API is still slower the the old # one so there no point in changing it for the moment # https://github.com/openssl/openssl/issues/25858 -Wno-deprecated-declarations -Werror=return-type ) #------------------------------------------------------------------------------- # CPU architecture flags #------------------------------------------------------------------------------- include(CPUArchFlags) #------------------------------------------------------------------------------- # Client-only flags #------------------------------------------------------------------------------- if (CLIENT) add_compile_definitions(CLIENT_ONLY=1) endif () #------------------------------------------------------------------------------- # Compiler specific flags #------------------------------------------------------------------------------- if (NOT MacOSX) if (CMAKE_CXX_COMPILER_ID MATCHES "Clang") # Clang requires linking with libatomic find_package(Atomic REQUIRED) link_libraries(${ATOMIC_LIBRARIES}) add_compile_options( -Wno-bitwise-instead-of-logical -Wno-constant-conversion -Wno-dangling-gsl -Wno-deprecated-copy-with-user-provided-copy -Wno-header-guard -Wno-implicit-const-int-float-conversion -Wno-implicit-int-float-conversion -Wno-mismatched-tags -Wno-missing-braces -Wno-pessimizing-move -Wno-uninitialized-const-reference -Wno-unknown-warning-option -Wno-unused-but-set-variable -Wno-unused-lambda-capture -Wno-unused-private-field $<$:-Wno-delete-non-abstract-non-virtual-dtor> $<$:-Wno-inconsistent-missing-override> $<$:-Wno-unqualified-std-cast-call> -Werror=return-type -fclang-abi-compat=17 ) endif() endif() #------------------------------------------------------------------------------- # Sanitizer flags #------------------------------------------------------------------------------- include(CheckCXXCompilerFlag) function(eos_enable_sanitizer sanitizer var) set(FLAG -fsanitize=${sanitizer}) list(APPEND CMAKE_REQUIRED_FLAGS "${FLAG}") list(APPEND CMAKE_REQUIRED_LINK_OPTIONS "${FLAG}") check_cxx_compiler_flag("${FLAG}" "${var}") if (${${var}}) # Set required flags in parent scope, as some sanitizers can't be used together set(CMAKE_REQUIRED_FLAGS ${CMAKE_REQUIRED_FLAGS} PARENT_SCOPE) set(CMAKE_REQUIRED_LINK_OPTIONS ${CMAKE_REQUIRED_LINK_OPTIONS} PARENT_SCOPE) add_compile_options(${FLAG} -fno-omit-frame-pointer) add_link_options(${FLAG}) else() message(FATAL_ERROR "Could not enable flag '${FLAG}'.\n" "Configure with --trace-expand to debug.") endif() endfunction() if(ASAN) eos_enable_sanitizer(address ASAN_SUPPORTED) endif() if(TSAN) eos_enable_sanitizer(thread TSAN_SUPPORTED) endif() ================================================ FILE: cmake/EosCoverage.cmake ================================================ # ---------------------------------------------------------------------- # File: EosCoverage.cmake # Author: Mihai Patrascoiu - CERN # ---------------------------------------------------------------------- # ************************************************************************ # * EOS - the CERN Disk Storage System * # * Copyright (C) 2019 CERN/Switzerland * # * * # * This program is free software: you can redistribute it and/or modify * # * it under the terms of the GNU General Public License as published by * # * the Free Software Foundation, either version 3 of the License, or * # * (at your option) any later version. * # * * # * This program is distributed in the hope that it will be useful, * # * but WITHOUT ANY WARRANTY; without even the implied warranty of * # * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * # * GNU General Public License for more details. * # * * # * You should have received a copy of the GNU General Public License * # * along with this program. If not, see .* # ************************************************************************ #------------------------------------------------------------------------------- # Code coverage compiler flags and definitions #------------------------------------------------------------------------------- include(CheckCXXCompilerFlag) list(APPEND CMAKE_REQUIRED_FLAGS --coverage) list(APPEND CMAKE_REQUIRED_LINK_OPTIONS --coverage) check_cxx_compiler_flag(--coverage COVERAGE_SUPPORTED) if (COVERAGE_SUPPORTED) add_compile_definitions(COVERAGE_BUILD) add_compile_options(--coverage) add_link_options(--coverage) else() message(FATAL_ERROR "Could not enable coverage. A compiler with '--coverage' support is required.") endif() # This is needed for correct results in multithreaded applications list(APPEND CMAKE_REQUIRED_FLAGS -fprofile-update=atomic) check_cxx_compiler_flag(-fprofile-update=atomic COVERAGE_UPDATE_ATOMIC) if (COVERAGE_UPDATE_ATOMIC) add_compile_options(-fprofile-update=atomic) else() message(WARNING "Could not enable atomic coverage updates, expect unreliable results.") endif() #------------------------------------------------------------------------------- # Code coverage targets #------------------------------------------------------------------------------- add_custom_target( raw-code-trace COMMAND lcov --capture --base-directory ${CMAKE_SOURCE_DIR} --directory ${CMAKE_BINARY_DIR} --no-external --config-file ${CMAKE_SOURCE_DIR}/coverage/eoslcov.rc --output-file ${CMAKE_BINARY_DIR}/raw-trace.info ) add_custom_target( filtered-trace-server COMMAND lcov --remove ${CMAKE_BINARY_DIR}/raw-trace.info "${CMAKE_BINARY_DIR}/\\*" "${CMAKE_SOURCE_DIR}/common/backward-cpp/\\*" "${CMAKE_SOURCE_DIR}/common/crc32c/\\*" "${CMAKE_SOURCE_DIR}/common/eos_cta_pb/\\*" "${CMAKE_SOURCE_DIR}/common/fmt/\\*" "${CMAKE_SOURCE_DIR}/common/xrootd-ssi-protobuf-interface/\\*" "${CMAKE_SOURCE_DIR}/console/\\*" "${CMAKE_SOURCE_DIR}/fst/tests/\\*" "${CMAKE_SOURCE_DIR}/namespace/ns_quarkdb/\\*" "${CMAKE_SOURCE_DIR}/test/\\*" "${CMAKE_SOURCE_DIR}/unit_tests/\\*" --config-file ${CMAKE_SOURCE_DIR}/coverage/eoslcov.rc --output-file ${CMAKE_BINARY_DIR}/filtered-trace-server.info DEPENDS raw-code-trace ) add_custom_target( filtered-trace-client COMMAND lcov --extract ${CMAKE_BINARY_DIR}/raw-trace.info "${CMAKE_SOURCE_DIR}/console/\\*" --config-file ${CMAKE_SOURCE_DIR}/coverage/eoslcov.rc --output-file ${CMAKE_BINARY_DIR}/filtered-trace-client.info DEPENDS raw-code-trace ) add_custom_target( coverage-server COMMAND genhtml ${CMAKE_BINARY_DIR}/filtered-trace-server.info --config-file ${CMAKE_SOURCE_DIR}/coverage/eoslcov.rc --output-directory ${CMAKE_BINARY_DIR}/coverage-report/server DEPENDS filtered-trace-server ) add_custom_target( coverage-client COMMAND genhtml ${CMAKE_BINARY_DIR}/filtered-trace-client.info --config-file ${CMAKE_SOURCE_DIR}/coverage/eoslcov.rc --output-directory ${CMAKE_BINARY_DIR}/coverage-report/client DEPENDS filtered-trace-client ) add_custom_target( coverage-report DEPENDS coverage-server coverage-client ) if (COV_CROSS_PROFILE) install( DIRECTORY ${CMAKE_BINARY_DIR} DESTINATION "/var/eos/coverage" FILES_MATCHING PATTERN "*.gcno" PERMISSIONS OWNER_READ OWNER_WRITE GROUP_READ WORLD_READ ) endif() ================================================ FILE: cmake/EosFindLibs.cmake ================================================ # ---------------------------------------------------------------------- # File: CMakeLists.txt # Author: Elvin-Alin Sindrilaru # ---------------------------------------------------------------------- # ************************************************************************ # * EOS - the CERN Disk Storage System * # * Copyright (C) 2011 CERN/Switzerland * # * * # * This program is free software: you can redistribute it and/or modify * # * it under the terms of the GNU General Public License as published by * # * the Free Software Foundation, either version 3 of the License, or * # * (at your option) any later version. * # * * # * This program is distributed in the hope that it will be useful, * # * but WITHOUT ANY WARRANTY; without even the implied warranty of * # * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * # * GNU General Public License for more details. * # * * # * You should have received a copy of the GNU General Public License * # * along with this program. If not, see .* # ************************************************************************ #------------------------------------------------------------------------------- # Search for dependencies #------------------------------------------------------------------------------- option(PACKAGEONLY "Build without dependencies" OFF) option(CLIENT "Build only client packages" OFF) option(BUILD_XRDCL_RAIN_PLUGIN "Enable XrdCl RAIN plugin" OFF) option(BUILD_CSS_PLUGIN "Enable CSS plugin" ON) # Find default versions of Python with higher priority set(Python_FIND_UNVERSIONED_NAMES "FIRST" CACHE STRING "") set(Python3_FIND_UNVERSIONED_NAMES "FIRST" CACHE STRING "") if(NOT PACKAGEONLY) set(THREADS_PREFER_PTHREAD_FLAG TRUE) find_package(Threads REQUIRED) find_package(PythonSitePkg REQUIRED) find_package(CURL REQUIRED) find_package(XRootD REQUIRED) find_package(fuse REQUIRED) find_package(Threads REQUIRED) find_package(ZLIB REQUIRED) find_package(readline REQUIRED) find_package(uuid REQUIRED) find_package(OpenSSL REQUIRED) find_package(ncurses REQUIRED) find_package(ZMQ REQUIRED) find_package(krb5 REQUIRED) find_package(SparseHash REQUIRED) find_package(jsoncpp REQUIRED) find_package(Libevent REQUIRED) find_package(fmt REQUIRED) find_package(bz2 REQUIRED) find_package(absl REQUIRED) find_package(RocksDB REQUIRED) find_package(jemalloc) find_package(EosGrpcGateway) find_package(Sphinx) find_package(fuse3) find_package(isal_crypto) find_package(isal) find_package(xxhash) find_package(libbfd) find_package(davix) find_package(nfs) find_package(Scitokens) find_package(GRPC REQUIRED) find_package(Protobuf3 REQUIRED) if (GRPC_FOUND AND XROOTD_FOUND) # Library paths for Protobuf, grpc and xrootd needs to be added to the # RPATH of the libraries and binaries built since they are not installed # in the usual system location. set(CMAKE_SKIP_RPATH FALSE) set(CMAKE_SKIP_BUILD_RPATH FALSE) # TODO: To be removed in the future when CMAKE properly handles RPATH. # Currently without this option the koji builds fail with error: # file RPATH_CHANGE could not write new RPATH set(CMAKE_BUILD_WITH_INSTALL_RPATH FALSE) set(CMAKE_INSTALL_RPATH_USE_LINK_PATH TRUE) get_filename_component(EOS_XROOTD_RPATH ${XROOTD_UTILS_LIBRARY} DIRECTORY) get_filename_component(EOS_GRPC_RPATH ${GRPC_GRPC++_LIBRARY} DIRECTORY) list(APPEND CMAKE_INSTALL_RPATH "${EOS_GRPC_RPATH};${EOS_XROOTD_RPATH}") message(STATUS "Info CMAKE_INSTALL_RPATH=${CMAKE_INSTALL_RPATH}") else() message(FATAL_ERROR "One of the mandatory dependecies: GPRC(Protobuf) or XRootD not found") endif() if (Linux) find_package(help2man) find_package(glibc REQUIRED) find_package(xfs REQUIRED) find_package(procps) find_package(libproc2) if(NOT (PROCPS_FOUND OR LIBPROC2_FOUND)) message(FATAL_ERROR "Could not find either procps 3.x or libproc2 (procps 4.x). " "At least one of them is required.") endif() else () # Add dummy targets for APPLE to simplify the cmake file using these targets add_library(GLIBC::DL INTERFACE IMPORTED) add_library(GLIBC::RT INTERFACE IMPORTED) add_library(GLIBC::M INTERFACE IMPORTED) endif() # The server build also requires if (NOT CLIENT) find_package(eosfolly REQUIRED) find_package(ldap REQUIRED) find_package(ActiveMQCPP REQUIRED) endif() else() message(STATUS "Running CMake in package only mode.") # Fake function for building the SRPMS in build system function(PROTOBUF_GENERATE_CPP SRCS HDRS) # This is just a hack to be able to run cmake >= 3.11 with -DPACKAGEONLY # enabled. Otherwise the protobuf libraries built using add_library will # complain as they have no SOURCE files. set(${SRCS} "${CMAKE_SOURCE_DIR}/common/Logging.cc" PARENT_SCOPE) set(${HDRS} "${CMAKE_SOURCE_DIR}/common/Logging.hh" PARENT_SCOPE) return() endfunction() function(GRPC_GENERATE_CPP SRCS HDRS) # This is just a hack to be able to run cmake >= 3.11 with -DPACKAGEONLY # enabled. Otherwise the protobuf libraries built using add_library will # complain as they have no SOURCE files. set(${SRCS} "${CMAKE_SOURCE_DIR}/common/Logging.cc" PARENT_SCOPE) set(${HDRS} "${CMAKE_SOURCE_DIR}/common/Logging.hh" PARENT_SCOPE) return() endfunction() # Fake targets add_library(ZLIB::ZLIB INTERFACE IMPORTED) add_library(UUID::UUID INTERFACE IMPORTED) add_library(PROCPS::PROCPS INTERFACE IMPORTED) add_library(XROOTD::SERVER INTERFACE IMPORTED) add_library(XROOTD::CL INTERFACE IMPORTED) add_library(XROOTD::SSI INTERFACE IMPORTED) add_library(XROOTD::HTTP INTERFACE IMPORTED) add_library(XROOTD::UTILS INTERFACE IMPORTED) add_library(XROOTD::POSIX INTERFACE IMPORTED) add_library(XROOTD::PRIVATE INTERFACE IMPORTED) add_library(PROTOBUF::PROTOBUF INTERFACE IMPORTED) add_library(NCURSES::NCURSES INTERFACE IMPORTED) add_library(NCURSES::NCURSES_STATIC INTERFACE IMPORTED) add_library(READLINE::READLINE INTERFACE IMPORTED) add_library(JSONCPP::JSONCPP INTERFACE IMPORTED) add_library(FOLLY::FOLLY INTERFACE IMPORTED) add_library(ZMQ::ZMQ INTERFACE IMPORTED) add_library(KRB5::KRB5 INTERFACE IMPORTED) add_library(OpenSSL::SSL INTERFACE IMPORTED) add_library(OpenSSL::Crypto INTERFACE IMPORTED) add_library(LDAP::LDAP INTERFACE IMPORTED) add_library(GRPC::grpc INTERFACE IMPORTED) add_library(GRPC::grpc++ INTERFACE IMPORTED) add_library(GRPC::grpc++_reflection INTERFACE IMPORTED) add_library(CURL::libcurl INTERFACE IMPORTED) add_library(ATOMIC::ATOMIC INTERFACE IMPORTED) add_library(LIBEVENT::LIBEVENT INTERFACE IMPORTED) add_library(FUSE::FUSE INTERFACE IMPORTED) add_library(FUSE3::FUSE3 INTERFACE IMPORTED) add_library(GLIBC::DL INTERFACE IMPORTED) add_library(GLIBC::RT INTERFACE IMPORTED) add_library(GLIBC::M INTERFACE IMPORTED) add_library(LIBBFD::LIBBFD INTERFACE IMPORTED) add_library(LIBBFD::IBERTY INTERFACE IMPORTED) add_library(RICHACL::RICHACL INTERFACE IMPORTED) add_library(DAVIX::DAVIX INTERFACE IMPORTED) add_library(NFS::NFS INTERFACE IMPORTED) add_library(ROCKSDB::ROCKSDB INTERFACE IMPORTED) add_library(SCITOKENS::SCITOKENS INTERFACE IMPORTED) add_library(ABSL::ABSL INTERFACE IMPORTED) add_library(BZ2::BZ2 INTERFACE IMPORTED) add_library(ZSTD::ZSTD INTERFACE IMPORTED) add_library(LZ4::LZ4 INTERFACE IMPORTED) add_library(Snappy::snappy INTERFACE IMPORTED) add_library(XFS::XFS INTERFACE IMPORTED) add_library(GOOGLE::SPARSEHASH INTERFACE IMPORTED) add_library(ISAL::ISAL INTERFACE IMPORTED) add_library(ISAL::ISAL_CRYPTO INTERFACE IMPORTED) add_library(XXHASH::XXHASH INTERFACE IMPORTED) add_library(JEMALLOC::JEMALLOC INTERFACE IMPORTED) add_library(EosGrpcGateway::EosGrpcGateway INTERFACE IMPORTED) add_library(fmt::fmt-header-only INTERFACE IMPORTED) add_library(ActiveMQCPP::ActiveMQCPP INTERFACE IMPORTED) endif() ================================================ FILE: cmake/EosGraphviz.cmake ================================================ # ---------------------------------------------------------------------- # File: CMakeLists.txt # Author: Elvin-Alin Sindrilaru # ---------------------------------------------------------------------- # ************************************************************************ # * EOS - the CERN Disk Storage System * # * Copyright (C) 2019 CERN/Switzerland * # * * # * This program is free software: you can redistribute it and/or modify * # * it under the terms of the GNU General Public License as published by * # * the Free Software Foundation, either version 3 of the License, or * # * (at your option) any later version. * # * * # * This program is distributed in the hope that it will be useful, * # * but WITHOUT ANY WARRANTY; without even the implied warranty of * # * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * # * GNU General Public License for more details. * # * * # * You should have received a copy of the GNU General Public License * # * along with this program. If not, see .* # ************************************************************************ #------------------------------------------------------------------------------- # Search for dependencies #------------------------------------------------------------------------------- find_program(DOT_EXE "dot") if(DOT_EXE) message(STATUS "dot found: ${DOT_EXE}") else() message(STATUS "dot not found!") endif() set(DOT_OUTPUT_TYPE "pdf" CACHE STRING "Build a dependency graph. Options are dot output types: ps, png, pdf..." ) if(DOT_EXE) add_custom_target(dependency-graph COMMAND ${CMAKE_COMMAND} ${CMAKE_SOURCE_DIR} --graphviz=${CMAKE_BINARY_DIR}/graphviz/${PROJECT_NAME}.dot COMMAND ${DOT_EXE} -T${DOT_OUTPUT_TYPE} ${CMAKE_BINARY_DIR}/graphviz/${PROJECT_NAME}.dot -o ${CMAKE_BINARY_DIR}/${PROJECT_NAME}.${DOT_OUTPUT_TYPE} COMMENT "Dependency graph generated and located at ${CMAKE_BINARY_DIR}/${PROJECT_NAME}.${DOT_OUTPUT_TYPE}") endif() ================================================ FILE: cmake/EosOSDefaults.cmake ================================================ # ---------------------------------------------------------------------- # File: CMakeLists.txt # Author: Elvin-Alin Sindrailru CERN # ---------------------------------------------------------------------- # ************************************************************************ # * EOS - the CERN Disk Storage System * # * Copyright (C) 2016 CERN/Switzerland * # * * # * This program is free software: you can redistribute it and/or modify * # * it under the terms of the GNU General Public License as published by * # * the Free Software Foundation, either version 3 of the License, or * # * (at your option) any later version. * # * * # * This program is distributed in the hope that it will be useful, * # * but WITHOUT ANY WARRANTY; without even the implied warranty of * # * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * # * GNU General Public License for more details. * # * * # * You should have received a copy of the GNU General Public License * # * along with this program. If not, see .* # ************************************************************************ #------------------------------------------------------------------------------- # Detect the operating system and define variables #------------------------------------------------------------------------------- # Nothing detected yet set(Linux FALSE ) set(MacOSX FALSE ) set(Windows FALSE ) set(OSDEFINE "") # Check if we are on Linux if("${CMAKE_SYSTEM_NAME}" STREQUAL "Linux") include(GNUInstallDirs) set(Linux TRUE ) set(OSDEFINE "-D__LINUX__=1") endif() # Check if we are on MacOSX if("${CMAKE_SYSTEM_NAME}" STREQUAL "Darwin" ) include(GNUInstallDirs) set(MacOSX TRUE ) set(CLIENT TRUE ) set(OSDEFINE "-D__APPLE__=1") # On MAC we don't link static objects at all set(FUSE_LIBRARY /usr/local/lib/libosxfuse_i64.dylib) set(CMAKE_MACOSX_RPATH TRUE) set(CMAKE_SKIP_BUILD_RPATH FALSE) set(CMAKE_BUILD_WITH_INSTALL_RPATH FALSE) list(APPEND CMAKE_INSTALL_RPATH "${CMAKE_INSTALL_PREFIX}/${CMAKE_INSTALL_LIBDIR}") set(CMAKE_INSTALL_RPATH_USE_LINK_PATH TRUE) list(FIND CMAKE_PLATFORM_IMPLICIT_LINK_DIRECTORIES "${CMAKE_INSTALL_PREFIX}/lib" isSystemDir) if("${isSystemDir}" STREQUAL "-1") list(APPEND CMAKE_INSTALL_RPATH "${CMAKE_INSTALL_PREFIX}/lib") endif() endif() # Check if we are on Windows if("${CMAKE_SYSTEM_NAME}" STREQUAL "Windows") set(Windows TRUE ) set(OSDEFINE "-D__WINDOWS__=1") endif() add_compile_definitions(${OSDEFINE}) ================================================ FILE: cmake/EosSummary.cmake ================================================ # ---------------------------------------------------------------------- # File: CMakeLists.txt # Author: Elvin-Alin Sindrilaru # ---------------------------------------------------------------------- # ************************************************************************ # * EOS - the CERN Disk Storage System * # * Copyright (C) 2011 CERN/Switzerland * # * * # * This program is free software: you can redistribute it and/or modify * # * it under the terms of the GNU General Public License as published by * # * the Free Software Foundation, either version 3 of the License, or * # * (at your option) any later version. * # * * # * This program is distributed in the hope that it will be useful, * # * but WITHOUT ANY WARRANTY; without even the implied warranty of * # * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * # * GNU General Public License for more details. * # * * # * You should have received a copy of the GNU General Public License * # * along with this program. If not, see .* # ************************************************************************ #------------------------------------------------------------------------------- # Print Configuration #------------------------------------------------------------------------------- message( STATUS "_________________________________________________" ) message( STATUS "Version : eos-" ${VERSION} "-" ${RELEASE} ) if (CLIENT) message( STATUS "Modules : client" ) else () message( STATUS "Modules : client + server" ) endif () set(FUSE_FLOCK_STATUS "FUSE_NO_FLOCK_SUPPORT") # We pass -DFUSE_MOUNT_VERSION0 hence the pragma for 290 in llfusexx if (FUSE3_FOUND OR ("${FUSE_MOUNT_VERSION}" STREQUAL "29")) set(FUSE_FLOCK_STATUS "FUSE_SUPPORTS_FLOCK") endif() message(STATUS "................................................." ) message(STATUS "prefix : " ${CMAKE_INSTALL_PREFIX} ) message(STATUS "bin dir : " ${CMAKE_INSTALL_FULL_BINDIR} ) message(STATUS "sbin dir : " ${CMAKE_INSTALL_SBINDIR} ) message(STATUS "lib dir : " ${CMAKE_INSTALL_FULL_LIBDIR} ) message(STATUS "sysconfig dir : " ${CMAKE_INSTALL_SYSCONFDIR} ) message(STATUS "................................................." ) message(STATUS "fuse2-build : ${FUSE_FOUND}") message(STATUS "fuse3-build : ${FUSE3_FOUND}") message(STATUS "fuse-mount-ver: ${FUSE_MOUNT_VERSION}") message(STATUS "fuse-flock : ${FUSE_FLOCK_STATUS}") message(STATUS "grpc-build : ${GRPC_FOUND}") message(STATUS "isa-l_crypto : ${ISAL_CRYPTO_FOUND}") message(STATUS "isa-l : ${ISAL_FOUND}") message(STATUS "xxhash : ${XXHASH_FOUND}") message(STATUS "davix : ${DAVIX_FOUND}") message( STATUS "................................................." ) message( STATUS "C Compiler : " ${CMAKE_C_COMPILER} ) message( STATUS "C++ Compiler : " ${CMAKE_CXX_COMPILER} ) message( STATUS "Protobuf : EXE " ${PROTOBUF3_PROTOC_EXECUTABLE} " INC " ${PROTOBUF3_INCLUDE_DIR} " LIB " ${PROTOBUF3_LIBRARY} ) message( STATUS "Build type : " ${CMAKE_BUILD_TYPE} ) message( STATUS "Code coverage : ${COVERAGE}") message( STATUS "_________________________________________________" ) unset(FUSE_FLOCK_STATUS) ================================================ FILE: cmake/EosTui.cmake ================================================ # ---------------------------------------------------------------------- # EOS TUI packaging helpers # ---------------------------------------------------------------------- set(EOS_TUI_LICENSE_URL "https://raw.githubusercontent.com/cern-eos/eos-tui/v${EOS_TUI_VERSION}/LICENSE") set(EOS_TUI_README_URL "https://raw.githubusercontent.com/cern-eos/eos-tui/v${EOS_TUI_VERSION}/README.md") if(CMAKE_SYSTEM_NAME STREQUAL "Linux" AND CMAKE_SYSTEM_PROCESSOR MATCHES "^(x86_64|amd64)$") set(EOS_TUI_BINARY_NAME "eos-tui_v${EOS_TUI_VERSION}_linux_amd64") set(EOS_TUI_BINARY_URL "https://github.com/cern-eos/eos-tui/releases/download/v${EOS_TUI_VERSION}/${EOS_TUI_BINARY_NAME}") elseif(CMAKE_SYSTEM_NAME STREQUAL "Linux" AND CMAKE_SYSTEM_PROCESSOR MATCHES "^(aarch64|arm64)$") set(EOS_TUI_BINARY_NAME "eos-tui_v${EOS_TUI_VERSION}_linux_arm64") set(EOS_TUI_BINARY_URL "https://github.com/cern-eos/eos-tui/releases/download/v${EOS_TUI_VERSION}/${EOS_TUI_BINARY_NAME}") endif() set(EOS_TUI_INSTALL_STAGING_DIR "${CMAKE_BINARY_DIR}/eos-tui/v${EOS_TUI_VERSION}") set(EOS_TUI_LICENSE_STAGED "${EOS_TUI_INSTALL_STAGING_DIR}/LICENSE") set(EOS_TUI_README_STAGED "${EOS_TUI_INSTALL_STAGING_DIR}/README.md") if(EOS_TUI_BINARY_NAME) set(EOS_TUI_BINARY_STAGED "${EOS_TUI_INSTALL_STAGING_DIR}/${EOS_TUI_BINARY_NAME}") configure_file( "${CMAKE_CURRENT_SOURCE_DIR}/cmake/EosTuiInstall.cmake.in" "${CMAKE_CURRENT_BINARY_DIR}/cmake/EosTuiInstall.cmake" @ONLY) install(SCRIPT "${CMAKE_CURRENT_BINARY_DIR}/cmake/EosTuiInstall.cmake") install(PROGRAMS "${EOS_TUI_BINARY_STAGED}" DESTINATION ${CMAKE_INSTALL_FULL_BINDIR} RENAME eos-tui) install(FILES "${EOS_TUI_LICENSE_STAGED}" DESTINATION "${CMAKE_INSTALL_FULL_DATAROOTDIR}/licenses/eos-tui") install(FILES "${EOS_TUI_README_STAGED}" DESTINATION "${CMAKE_INSTALL_FULL_DATAROOTDIR}/doc/eos-tui") else() message(WARNING "EOS TUI install is only configured for Linux x86_64 and aarch64 builds.") endif() ================================================ FILE: cmake/EosTuiInstall.cmake.in ================================================ # ---------------------------------------------------------------------- # EOS TUI install helpers # ---------------------------------------------------------------------- function(eos_tui_download url destination) if(EXISTS "${destination}") return() endif() file(DOWNLOAD "${url}" "${destination}" STATUS download_status TLS_VERIFY ON) list(GET download_status 0 download_code) list(GET download_status 1 download_message) if(NOT download_code EQUAL 0) file(REMOVE "${destination}") message(FATAL_ERROR "Failed to download ${url}: ${download_message}") endif() endfunction() file(MAKE_DIRECTORY "@EOS_TUI_INSTALL_STAGING_DIR@") eos_tui_download("@EOS_TUI_BINARY_URL@" "@EOS_TUI_BINARY_STAGED@") eos_tui_download("@EOS_TUI_LICENSE_URL@" "@EOS_TUI_LICENSE_STAGED@") eos_tui_download("@EOS_TUI_README_URL@" "@EOS_TUI_README_STAGED@") ================================================ FILE: cmake/EosUtils.cmake ================================================ # ---------------------------------------------------------------------- # File: CMakeLists.txt # Author: Andreas-Joachim Peters - CERN # ---------------------------------------------------------------------- # ************************************************************************ # * EOS - the CERN Disk Storage System * # * Copyright (C) 2011 CERN/Switzerland * # * * # * This program is free software: you can redistribute it and/or modify * # * it under the terms of the GNU General Public License as published by * # * the Free Software Foundation, either version 3 of the License, or * # * (at your option) any later version. * # * * # * This program is distributed in the hope that it will be useful, * # * but WITHOUT ANY WARRANTY; without even the implied warranty of * # * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * # * GNU General Public License for more details. * # * * # * You should have received a copy of the GNU General Public License * # * along with this program. If not, see .* # ************************************************************************ #------------------------------------------------------------------------------- # Get UID/GID for an account #------------------------------------------------------------------------------- function(EOS_GetUidGid USERNAME UIDVARNAME GIDVARNAME) execute_process( COMMAND sh -c "id -u ${USERNAME}" OUTPUT_VARIABLE UID OUTPUT_STRIP_TRAILING_WHITESPACE RESULT_VARIABLE RETC) execute_process( COMMAND sh -c "id -g ${USERNAME}" OUTPUT_VARIABLE GID OUTPUT_STRIP_TRAILING_WHITESPACE RESULT_VARIABLE RETC) set(${UIDVARNAME} ${UID} PARENT_SCOPE) set(${GIDVARNAME} ${GID} PARENT_SCOPE) if(NOT ("${RETC}" STREQUAL "0") ) message(FATAL_ERROR "Error calling uid, return code is ${RETC}") endif() endfunction() #------------------------------------------------------------------------------- # Get version #------------------------------------------------------------------------------- function(EOS_GetVersion MAJOR MINOR PATCH RELEASE) if(("${MAJOR}" STREQUAL "") OR ("${MINOR}" STREQUAL "") OR ("${PATCH}" STREQUAL "")) message(VERBOSE "Determining EOS version with `${CMAKE_SOURCE_DIR}/genversion.sh` script") execute_process( COMMAND ${CMAKE_SOURCE_DIR}/genversion.sh ${CMAKE_SOURCE_DIR} OUTPUT_VARIABLE VERSION_INFO RESULT_VARIABLE VERSION_INFO_RESULT OUTPUT_STRIP_TRAILING_WHITESPACE ) if(NOT VERSION_INFO_RESULT EQUAL 0) set(VERSION_ERR_MSG "Error getting EOS version info using `${CMAKE_SOURCE_DIR}/genversion.sh`") if(CMAKE_BUILD_TYPE STREQUAL "Debug") # for debug builds we take version from cache if available otherwise use a dummy version message(WARNING "${VERSION_ERR_MSG}") if(NOT DEFINED VERSION_INFO_FROM_CACHE) # use a placeholder version (might need to be updated in the future) set(VERSION_INFO "5.3.27-unknown") else() message(WARNING "Using cached EOS version info for debug build: ${VERSION_INFO_FROM_CACHE}") set(VERSION_INFO "${VERSION_INFO_FROM_CACHE}") endif() message(WARNING "Setting EOS version to ${VERSION_INFO} for debug build") else() message(FATAL_ERROR "${VERSION_ERR_MSG}") endif() else() message(VERBOSE "Detected EOS version: ${VERSION_INFO}") if(CMAKE_BUILD_TYPE STREQUAL "Debug") set(VERSION_INFO_FROM_CACHE "${VERSION_INFO}" CACHE INTERNAL "Cached EOS version info for debug builds") endif() endif() string(REPLACE "." ";" VERSION_LIST ${VERSION_INFO}) list(GET VERSION_LIST 0 MAJOR) list(GET VERSION_LIST 1 MINOR) list(GET VERSION_LIST 2 PATCH) # The patch could also contain the RELEASE value if this is a snapshot string(FIND "${PATCH}" "-" POS) if (NOT "${POS}" EQUAL "-1") string(REPLACE "-" ";" PR_LIST ${PATCH}) list(GET PR_LIST 0 PATCH) # Set RELEASE on if not already set if ("${RELEASE}" STREQUAL "") list(GET PR_LIST 1 RELEASE) endif() endif() endif() set(VERSION_MAJOR ${MAJOR} PARENT_SCOPE) set(VERSION_MINOR ${MINOR} PARENT_SCOPE) set(VERSION_PATCH ${PATCH} PARENT_SCOPE) set(VERSION "${MAJOR}.${MINOR}.${PATCH}" PARENT_SCOPE) if("${RELEASE}" STREQUAL "") set(RELEASE "1") endif() set(RELEASE ${RELEASE} PARENT_SCOPE) endfunction() #------------------------------------------------------------------------------- # Detect in source builds #------------------------------------------------------------------------------- macro(EOS_CheckOutOfSourceBuild) #Check if previous in-source build failed if(EXISTS ${CMAKE_SOURCE_DIR}/CMakeCache.txt OR EXISTS ${CMAKE_SOURCE_DIR}/CMakeFiles) message(FATAL_ERROR "CMakeCache.txt or CMakeFiles exists in source directory!") message(FATAL_ERROR "Please remove them before running cmake .") endif(EXISTS ${CMAKE_SOURCE_DIR}/CMakeCache.txt OR EXISTS ${CMAKE_SOURCE_DIR}/CMakeFiles) # Get real paths of the source and binary directories get_filename_component(srcdir "${CMAKE_SOURCE_DIR}" REALPATH) get_filename_component(bindir "${CMAKE_BINARY_DIR}" REALPATH) # Check for in-source builds if(${srcdir} STREQUAL ${bindir}) message(FATAL_ERROR "EOS cannot be built in-source! Please run cmake outside the source directory") endif(${srcdir} STREQUAL ${bindir}) endmacro(EOS_CheckOutOfSourceBuild) ================================================ FILE: cmake/FindActiveMQCPP.cmake ================================================ # FindActiveMQCPP.cmake # Locate the header find_path(ACTIVEMQCPP_INCLUDE_DIR NAMES cms/Connection.h PATH_SUFFIXES activemq-cpp activemq-cpp-3.9.5 activemq-cpp-3.9 activemq-cpp-3 PATHS /usr/include /usr/local/include /opt/include ) # Locate the library find_library(ACTIVEMQCPP_LIBRARY NAMES activemq-cpp PATHS /usr/lib /usr/lib64 /usr/local/lib /usr/local/lib64 /opt/lib /opt/lib64 ) include(FindPackageHandleStandardArgs) find_package_handle_standard_args(ActiveMQCPP REQUIRED_VARS ACTIVEMQCPP_LIBRARY ACTIVEMQCPP_INCLUDE_DIR ) if (ACTIVEMQCPP_FOUND AND NOT TARGET ActiveMQCPP::ActiveMQCPP) add_library(ActiveMQCPP::ActiveMQCPP UNKNOWN IMPORTED) set_target_properties(ActiveMQCPP::ActiveMQCPP PROPERTIES IMPORTED_LOCATION "${ACTIVEMQCPP_LIBRARY}" INTERFACE_INCLUDE_DIRECTORIES "${ACTIVEMQCPP_INCLUDE_DIR}" ) endif() ================================================ FILE: cmake/FindAtomic.cmake ================================================ # Try to find libatmoic # Once done, this will define # # ATOMIC_FOUND - system has libatomic # ATOMIC_LIBRARIES - libraries needed to use libatomic # # and the following imported target # ATOMIC::ATOMIC find_library(ATOMIC_LIBRARY NAMES atomic atomic.so.1 libatomic.so.1 HINTS ${ATOMIC_ROOT} PATH_SUFFIXES ${CMAKE_INSTALL_LIBDIR}) include(FindPackageHandleStandardArgs) find_package_handle_standard_args(Atomic DEFAULT_MSG ATOMIC_LIBRARY) if (ATOMIC_FOUND AND NOT TARGET ATOMIC::ATOMIC) mark_as_advanced(ATOMIC_LIBRARY) add_library(ATOMIC::ATOMIC UNKNOWN IMPORTED) set_target_properties(ATOMIC::ATOMIC PROPERTIES IMPORTED_LOCATION ${ATOMIC_LIBRARY}) endif() set(ATOMIC_LIBRARIES ${ATOMIC_LIBRARY}) unset(ATOMIC_LIBRARY) ================================================ FILE: cmake/FindEosGrpcGateway.cmake ================================================ # Try to find eos-grpc-gateway library and header files # Once done, this will define # # EosGrpcGateway_FOUND - system has grpc gateway library # EosGrpcGateway_INCLUDE_DIRS - gateway include directories # EosGrpcGateway_LIBRARY - gateway library # find_path(EosGrpcGateway_INCLUDE_DIR NAMES EosGrpcGateway.h HINTS /usr ${EosGrpcGateway_ROOT} PATH_SUFFIXES include) find_library(EosGrpcGateway_LIBRARY NAMES libEosGrpcGateway.so HINTS /usr/lib64 ${EosGrpcGateway_ROOT} PATH_SUFFIXES ${CMAKE_INSTALL_LIBDIR}) if(EosGrpcGateway_LIBRARY) set(EosGrpcGateway_FOUND 1) message (STATUS "EosGrpcGateway_LIBRARY=${EosGrpcGateway_LIBRARY}") endif() include(FindPackageHandleStandardArgs) find_package_handle_standard_args(EosGrpcGateway REQUIRED_VARS EosGrpcGateway_LIBRARY EosGrpcGateway_INCLUDE_DIR) mark_as_advanced(EosGrpcGateway_INCLUDE_DIR EosGrpcGateway_LIBRARY) message(STATUS "EosGrpcGateway_INCLUDE_DIR=${EosGrpcGateway_INCLUDE_DIR}") if (EosGrpcGateway_FOUND AND NOT TARGET EosGrpcGateway::EosGrpcGateway) add_library(EosGrpcGateway::EosGrpcGateway UNKNOWN IMPORTED) set_target_properties(EosGrpcGateway::EosGrpcGateway PROPERTIES IMPORTED_LOCATION "${EosGrpcGateway_LIBRARY}" INTERFACE_INCLUDE_DIRECTORIES "${EosGrpcGateway_INCLUDE_DIR}" INTERFACE_COMPILE_DEFINITIONS EOS_GRPC_GATEWAY=1) else() add_library(EosGrpcGateway::EosGrpcGateway INTERFACE IMPORTED) endif () unset(EosGrpcGateway_INCLUDE_DIR) unset(EosGrpcGateway_LIBRARY) ================================================ FILE: cmake/FindGRPC.cmake ================================================ # # Locate and configure the GRPC library # # Adds the following targets: # # GRPC::grpc - GRPC library # GRPC::grpc++ - GRPC C++ library # GRPC::grpc++_reflection - GRPC C++ reflection library # GRPC::grpc_cpp_plugin - C++ generator plugin for Protocol Buffers # # # Generates C++ sources from the .proto files # # grpc_generate_cpp ( [...]) # # SRCS - variable to define with autogenerated source files # HDRS - variable to define with autogenerated header files # DEST - directory where the source files will be created # ARGN - .proto files # function(GRPC_GENERATE_CPP SRCS HDRS DEST) if(NOT ARGN) message(SEND_ERROR "Error: GRPC_GENERATE_CPP() called without any proto files") return() endif() if(GRPC_GENERATE_CPP_APPEND_PATH) # Create an include path for each file specified foreach(FIL ${ARGN}) get_filename_component(ABS_FIL ${FIL} ABSOLUTE) get_filename_component(ABS_PATH ${ABS_FIL} PATH) list(FIND _protobuf_include_path ${ABS_PATH} _contains_already) if(${_contains_already} EQUAL -1) list(APPEND _protobuf_include_path -I ${ABS_PATH}) endif() endforeach() else() set(_protobuf_include_path -I ${CMAKE_CURRENT_SOURCE_DIR}) endif() if(DEFINED PROTOBUF3_IMPORT_DIRS) foreach(DIR ${PROTOBUF3_IMPORT_DIRS}) get_filename_component(ABS_PATH ${DIR} ABSOLUTE) list(FIND _protobuf_include_path ${ABS_PATH} _contains_already) if(${_contains_already} EQUAL -1) list(APPEND _protobuf_include_path -I ${ABS_PATH}) endif() endforeach() endif() set(${SRCS}) set(${HDRS}) foreach(FIL ${ARGN}) get_filename_component(ABS_FIL ${FIL} ABSOLUTE) get_filename_component(FIL_WE ${FIL} NAME_WE) list(APPEND ${SRCS} "${DEST}/${FIL_WE}.grpc.pb.cc") list(APPEND ${HDRS} "${DEST}/${FIL_WE}.grpc.pb.h") add_custom_command( OUTPUT "${DEST}/${FIL_WE}.grpc.pb.cc" "${DEST}/${FIL_WE}.grpc.pb.h" COMMAND ${CMAKE_COMMAND} -E env "LD_LIBRARY_PATH=${GRPC_LD_LIBRARY_PATH}:$LD_LIBRARY_PATH" ${PROTOBUF3_PROTOC_EXECUTABLE} ARGS --grpc_out ${DEST} ${_protobuf_include_path} --plugin=protoc-gen-grpc=${GRPC_CPP_PLUGIN} ${ABS_FIL} DEPENDS ${ABS_FIL} ${PROTOBUF3_PROTOC_EXECUTABLE} GRPC::grpc_cpp_plugin COMMENT "Running C++ GRPC compiler on ${FIL}" VERBATIM ) endforeach() set_source_files_properties(${${SRCS}} ${${HDRS}} PROPERTIES GENERATED TRUE) set(${SRCS} ${${SRCS}} PARENT_SCOPE) set(${HDRS} ${${HDRS}} PARENT_SCOPE) endfunction() # By default have GRPC_GENERATE_CPP macro pass -I to protoc # for each directory where a proto file is referenced. if(NOT DEFINED GRPC_GENERATE_CPP_APPEND_PATH) set(GRPC_GENERATE_CPP_APPEND_PATH TRUE) endif() # Find GRPC include directory find_path(GRPC_INCLUDE_DIR NAMES grpc/grpc.h HINTS ${GRPC_ROOT} PATHS /opt/eos/grpc /usr/local /usr PATH_SUFFIXES include NO_DEFAULT_PATH) mark_as_advanced(GRPC_INCLUDE_DIR) # Find GRPC library find_library(GRPC_LIBRARY NAMES grpc HINTS ${GRPC_ROOT} PATHS /opt/eos/grpc /usr/local /usr PATH_SUFFIXES ${CMAKE_INSTALL_LIBDIR} NO_DEFAULT_PATH) # Find GRPC C++ library find_library(GRPC_GRPC++_LIBRARY NAMES grpc++ HINTS ${GRPC_ROOT} PATHS /opt/eos/grpc /usr/local /usr PATH_SUFFIXES ${CMAKE_INSTALL_LIBDIR} NO_DEFAULT_PATH) # Find GRPC libgpr find_library(GRPC_LIBGPR_LIBRARY NAMES gpr HINTS ${GRPC_ROOT} PATHS /opt/eos/grpc /usr/local /usr PATH_SUFFIXES ${CMAKE_INSTALL_LIBDIR} NO_DEFAULT_PATH) # Find GRPC C++ reflection library find_library(GRPC_GRPC++_REFLECTION_LIBRARY NAMES grpc++_reflection HINTS ${GRPC_ROOT} PATHS /opt/eos/grpc /usr/local /usr PATH_SUFFIXES ${CMAKE_INSTALL_LIBDIR} NO_DEFAULT_PATH) # Find GRPC CPP generator find_program(GRPC_CPP_PLUGIN NAMES grpc_cpp_plugin HINTS ${GRPC_ROOT} PATHS /opt/eos/grpc /usr/local /usr PATH_SUFFIXES bin NO_DEFAULT_PATH) # Find libabsl_synchronization find_library(ABSL_SYNCHRONIZATION_LIBRARY NAMES absl_synchronization HINTS ${GRPC_ROOT} ${ABSL_ROOT} PATHS /opt/eos/grpc /usr/local /usr PATH_SUFFIXES ${CMAKE_INSTALL_LIBDIR} NO_DEFAULT_PATH) # Find libabsl_base find_library(ABSL_BASE_LIBRARY NAMES absl_base HINTS ${GRPC_ROOT} ${ABSL_ROOT} PATHS /opt/eos/grpc /usr/local /usr PATH_SUFFIXES ${CMAKE_INSTALL_LIBDIR} NO_DEFAULT_PATH) include(FindPackageHandleStandardArgs) find_package_handle_standard_args(GRPC REQUIRED_VARS GRPC_LIBRARY GRPC_INCLUDE_DIR ABSL_BASE_LIBRARY ABSL_SYNCHRONIZATION_LIBRARY GRPC_LIBGPR_LIBRARY GRPC_GRPC++_REFLECTION_LIBRARY GRPC_CPP_PLUGIN) mark_as_advanced(GRPC_LIBRARY GRPC_GRPC++_LIBRARY GRPC_GRPC++_REFLECTION_LIBRARY GRPC_CPP_PLUGIN) if (GRPC_FOUND AND NOT TARGET GRPC::grpc AND NOT TARGET GRPC::grpc++) get_filename_component(GRPC_LD_LIBRARY_PATH ${GRPC_GRPC++_LIBRARY} DIRECTORY) message(STATUS "GRPC library path: ${GRPC_LD_LIBRARY_PATH}") add_library(GRPC::grpc UNKNOWN IMPORTED) set_target_properties(GRPC::grpc PROPERTIES INTERFACE_INCLUDE_DIRECTORIES ${GRPC_INCLUDE_DIR} INTERFACE_LINK_LIBRARIES "-lpthread;-ldl" IMPORTED_LOCATION ${GRPC_LIBRARY} INTERFACE_COMPILE_DEFINITIONS EOS_GRPC=1) add_library(GRPC::GPR UNKNOWN IMPORTED) set_target_properties(GRPC::GPR PROPERTIES INTERFACE_INCLUDE_DIRECTORIES ${GRPC_INCLUDE_DIR} IMPORTED_LOCATION ${GRPC_LIBGPR_LIBRARY}) add_library(GRPC::grpc++ UNKNOWN IMPORTED) set_target_properties(GRPC::grpc++ PROPERTIES INTERFACE_INCLUDE_DIRECTORIES ${GRPC_INCLUDE_DIR} INTERFACE_LINK_LIBRARIES "${GRPC_LIBGPR_LIBRARY};${ABSL_SYNCHRONIZATION_LIBRARY};${ABSL_BASE_LIBRARY}" IMPORTED_LOCATION ${GRPC_GRPC++_LIBRARY} INTERFACE_COMPILE_DEFINITIONS EOS_GRPC=1) add_library(GRPC::grpc++_reflection UNKNOWN IMPORTED) set_target_properties(GRPC::grpc++_reflection PROPERTIES INTERFACE_INCLUDE_DIRECTORIES ${GRPC_INCLUDE_DIR} INTERFACE_LINK_LIBRARIES GRPC::grpc++ IMPORTED_LOCATION ${GRPC_GRPC++_REFLECTION_LIBRARY}) add_executable(GRPC::grpc_cpp_plugin IMPORTED) set_target_properties(GRPC::grpc_cpp_plugin PROPERTIES IMPORTED_LOCATION ${GRPC_CPP_PLUGIN}) endif() unset(GRPC_INCLUDE_DIR) unset(GRPC_LIBRARY) ================================================ FILE: cmake/FindGlobus.cmake ================================================ # - Locate Globus libraries # Defines: # # GLOBUS_FOUND # GLOBUS_INCLUDE_DIR # GLOBUS_INCLUDE_DIRS (not cached) # GLOBUS_LIBRARIES (not cached) # GLOBUS_xxx_LIBRARY set( headers globus_config globus_gsi_credential ) foreach( head ${headers} ) find_path(GLOBUS_INCLUDE_DIR_${head} NAMES ${head}.h HINTS ${GLOBUS_DIR}/include $ENV{GLOBUS_LOCATION}/include /usr/include/x86_64-linux-gnu /opt/globus/include /usr/include PATH_SUFFIXES gcc32 gcc32dbg gcc32pthr gcc32dbgpthr gcc64 gcc64dbg gcc64pthr gcc64dbgpthr globus) if(GLOBUS_INCLUDE_DIR_${head}) list(APPEND GLOBUS_INCLUDE_DIRS ${GLOBUS_INCLUDE_DIR_${head}}) endif() endforeach() #message("found GLOBUS include dirs" ${GLOBUS_INCLUDE_DIRS}) set(libraries gssapi_gsi gss_assist gsi_credential common gsi_callback proxy_ssl gsi_sysconfig openssl_error oldgaa gsi_cert_utils openssl gsi_proxy_core callout) foreach( lib ${libraries}) find_library(GLOBUS_${lib}_LIBRARY NAMES globus_${lib} HINTS ${GLOBUS_DIR}/lib $ENV{GLOBUS_LOCATION}/lib /usr/lib/x86_64-linux-gnu ) if(GLOBUS_${lib}_LIBRARY) set(GLOBUS_${lib}_FOUND 1) list(APPEND GLOBUS_LIBRARIES ${GLOBUS_${lib}_LIBRARY}) mark_as_advanced(GLOBUS_${lib}_LIBRARY) endif() endforeach() # handle the QUIETLY and REQUIRED arguments and set GLOBUS_FOUND to TRUE if # all listed variables are TRUE INCLUDE(FindPackageHandleStandardArgs) FIND_PACKAGE_HANDLE_STANDARD_ARGS(GLOBUS DEFAULT_MSG GLOBUS_INCLUDE_DIR GLOBUS_common_LIBRARY) mark_as_advanced(GLOBUS_FOUND GLOBUS_INCLUDE_DIR) ================================================ FILE: cmake/FindLibevent.cmake ================================================ # Try to find libevent # Once done, this will define # # LIBEVENT_FOUND - system has libevent # LIBEVENT_INCLUDE_DIRS - libevent include directories # LIBEVENT_LIBRARIES - libraries needed to use libevent # # and the following imported targets # # LIBEVENT::LIBEVENT find_package(PkgConfig) pkg_check_modules(PC_libevent QUIET libevent) set(LIBEVENT_VERSION ${PC_libevent_VERSION}) find_path(LIBEVENT_INCLUDE_DIR NAMES event.h HINTS ${LIBEVENT_ROOT} ${PC_libevent_INCLUDEDIR} ${PC_libevent_INCLUDE_DIRS} /usr/include/event2) find_library( LIBEVENT_LIBRARY NAMES event2 event HINTS ${LIBEVENT_ROOT} ${PC_libevent_LIBDIR} ${PC_libevent_LIBRARY_DIRS} PATH_SUFFIXES ${CMAKE_INSTALL_LIBDIR}) include(FindPackageHandleStandardArgs) find_package_handle_standard_args(Libevent REQUIRED_VARS LIBEVENT_LIBRARY LIBEVENT_INCLUDE_DIR) if (LIBEVENT_FOUND AND NOT TARGET LIBEVENT::LIBEVENT) mark_as_advanced(LIBEVENT_FOUND LIBEVENT_INCLUDE_DIR LIBEVENT_LIBRARY) add_library(LIBEVENT::LIBEVENT UNKNOWN IMPORTED) set_target_properties(LIBEVENT::LIBEVENT PROPERTIES IMPORTED_LOCATION "${LIBEVENT_LIBRARY}" INTERFACE_INCLUDE_DIRECTORIES "${LIBEVENT_INCLUDE_DIR}") endif() set(LIBEVENT_INCLUDE_DIRS ${LIBEVENT_INCLUDE_DIR}) set(LIBEVENT_LIBRARIES ${LIBEVENT_LIBRARY}) unset(LIBEVENT_INCLUDE_DIR) unset(LIBEVENT_LIBRARY) ================================================ FILE: cmake/FindProtobuf3.cmake ================================================ # Try to find PROTOBUF3 # Once done, this will define # # PROTOBUF3_FOUND - system has Protobuf3 # PROTOBUF3_INCLUDE_DIRS - Protobuf3 include directories # PROTOBUF3_LIBRARIES - libraries needed to use Protobuf3 # # and the following imported targets # # PROTOBUF::PROTOBUF # # PROTOBUF_ROOT may be defined as a hint for where to look find_program(PROTOBUF3_PROTOC_EXECUTABLE NAMES protoc HINTS ${PROTOBUF_ROOT} PATHS /opt/eos/grpc/ /opt/eos/ /usr/local /usr / PATH_SUFFIXES bin DOC "Version 3 of The Google Protocol Buffers Compiler (protoc)" NO_DEFAULT_PATH) message(STATUS "Found protoc: ${PROTOBUF3_PROTOC_EXECUTABLE}") find_path(PROTOBUF3_INCLUDE_DIR NAMES google/protobuf/message.h HINTS ${PROTOBUF_ROOT} PATHS /opt/eos/grpc/ /opt/eos/include/protobuf3 /usr/include/protobuf3 /usr/local /usr PATH_SUFFIXES include NO_DEFAULT_PATH) find_library(PROTOBUF3_LIBRARY NAME protobuf HINTS ${PROTOBUF_ROOT} PATHS /opt/eos/grpc/ /usr/lib64/protobuf3 /usr/lib/protobuf3 /usr/local /usr /usr/lib/x86_64-linux-gnu PATH_SUFFIXES ${CMAKE_INSTALL_LIBDIR} lib64 lib NO_DEFAULT_PATH) include(FindPackageHandleStandardArgs) find_package_handle_standard_args(Protobuf3 REQUIRED_VARS PROTOBUF3_LIBRARY PROTOBUF3_INCLUDE_DIR PROTOBUF3_PROTOC_EXECUTABLE) mark_as_advanced(PROOBUF3_FOUND PROTOBUF3_INCLUDE_DIR PROTOBUF3_LIBRARY PROTOBUF3_PROTOC_EXECUTABLE) if (PROTOBUF3_FOUND AND NOT TARGET PROTOBUF::PROTOBUF) # These are set for make the find_package(Protobuf) happy at the end and # at the same time include the PROTOBUF_GENERATE_CPP function set(Protobuf_FOUND ${PROTOBUF3_FOUND}) set(Protobuf_INCLUDE_DIR ${PROTOBUF3_INCLUDE_DIR}) set(Protobuf_LIBRARY ${PROTOBUF3_LIBRARY}) set(Protobuf_PROTOC_EXECUTABLE ${PROTOBUF3_PROTOC_EXECUTABLE}) add_library(PROTOBUF::PROTOBUF UNKNOWN IMPORTED) set_target_properties(PROTOBUF::PROTOBUF PROPERTIES IMPORTED_LOCATION "${PROTOBUF3_LIBRARY}" INTERFACE_INCLUDE_DIRECTORIES "${PROTOBUF3_INCLUDE_DIR}") target_compile_definitions(PROTOBUF::PROTOBUF INTERFACE PROTOBUF_USE_DLLS=1) # Overwrite these since they are used in generating the Protobuf files if (NOT TARGET protobuf::protoc) add_executable(protobuf::protoc IMPORTED) endif() set_target_properties(protobuf::protoc PROPERTIES IMPORTED_LOCATION ${PROTOBUF3_PROTOC_EXECUTABLE}) if (NOT TARGET protobuf::libprotobuf) add_library(protobuf::libprotobuf UNKNOWN IMPORTED) endif() set_target_properties(protobuf::libprotobuf PROPERTIES INTERFACE_INCLUDE_DIRECTORIES ${PROTOBUF3_INCLUDE_DIR} IMPORTED_LOCATION ${PROTOBUF3_LIBRARY}) endif () # Include Protobuf package for the generation commands like PROTOBUF_GENERATE_CPP find_package(Protobuf) ================================================ FILE: cmake/FindPythonSitePkg.cmake ================================================ # Try to find python # Once done, this will define # # PYTHONSITEPKG_FOUND - found python site packages directory # PYTHONSITEPKG_PATH - location where python modules are installed find_package (Python3 COMPONENTS Interpreter Development) if(NOT Python3_Interpreter_FOUND) set(PYTHONSITEPKG_FOUND FALSE) return() else() set(PYTHONSITEPKG_FOUND TRUE) endif() if(Python3_SITELIB) set(PYTHONSITEPKG_FIND_QUIETLY TRUE) set(PYTHONSITEPKG_PATH "${Python3_SITELIB}") message(STATUS "Python Site Path: ${PYTHONSITEPKG_PATH} (site lib found)") else() if((PYTHON_VERSION_MAJOR VERSION_EQUAL "3") OR (PYTHON_VERSION_MAJOR VERSION_GREATER "3")) set(PY_CMD "from distutils import sysconfig; print(sysconfig.get_python_lib());") else() set(PY_CMD "from distutils import sysconfig; print sysconfig.get_python_lib();") endif() execute_process( COMMAND "${PYTHON_EXECUTABLE}" "-c" "${PY_CMD}" RESULT_VARIABLE _PYTHON_SUCCESS ERROR_VARIABLE _PYTHON_ERROR_VALUE OUTPUT_VARIABLE PYTHONSITEPKG_PATH OUTPUT_STRIP_TRAILING_WHITESPACE) if(NOT _PYTHON_SUCCESS MATCHES 0) if(PYTHONSITEPKG_FIND_REQUIRED) message(FATAL_ERROR "Python config failure:\n${_PYTHON_ERROR_VALUE}") else() message(STATUS "PythonSitePkg was not required") endif() set(PYTHONLIBS_FOUND FALSE) return() endif() string(REGEX REPLACE "\n" "" PYTHONSITEPKG_PATH ${PYTHONSITEPKG_PATH}) message(STATUS "Python Site Path: ${PYTHONSITEPKG_PATH}") include (FindPackageHandleStandardArgs) find_package_handle_standard_args( Python3SitePkg DEFAULT_MSG PYTHONSITEPKG_PATH) mark_as_advanced(PythonSitePkg PYTHONSITEPKG_PATH) endif() ================================================ FILE: cmake/FindRocksDB.cmake ================================================ # Try to find rocksdb # Once done, this will define # # ROCKSDB_FOUND - system has rocksdb # # and the following imported targets # # ROCKSDB::ROCKSDB find_path(ROCKSDB_INCLUDE_DIR NAMES rocksdb/version.h HINTS ${ROCKSDB_ROOT} PATHS /opt/eos/rocksdb/ /usr/local /usr PATH_SUFFIXES include NO_DEFAULT_PATH) find_library(ROCKSDB_LIBRARY NAMES rocksdb HINTS ${ROCKSDB_ROOT} PATHS /opt/eos/rocksdb/ /usr/local /usr PATH_SUFFIXES ${CMAKE_INSTALL_LIBDIR} lib NO_DEFAULT_PATH) find_library(ROCKSDB_TOOLS_LIBRARY NAMES rocksdb_tools HINTS ${ROCKSDB_ROOT} PATHS /opt/eos/rocksdb /usr/local /usr PATH_SUFFIXES ${CMAKE_INSTALL_LIBDIR} lib NO_DEFAULT_PATH) include(FindPackageHandleStandardArgs) find_package_handle_standard_args(RocksDB REQUIRED_VARS ROCKSDB_LIBRARY ROCKSDB_INCLUDE_DIR) mark_as_advanced(ROCKSDB_FOUND ROCKSDB_LIBRARY ROCKSDB_INCLUDE_DIR) if (ROCKSDB_FOUND AND NOT TARGET ROCKSDB::ROCKSDB) find_package(zstd REQUIRED) find_package(lz4 REQUIRED) find_package(BZip2 REQUIRED) find_package(Snappy REQUIRED) set(ROCKSDB_LIBRARIES "ZSTD::ZSTD;LZ4::LZ4;BZip2::BZip2;Snappy::snappy") #@note: The ROCKSDB_LIBRARY must be specified again after # the ROCKSDB_TOOLS_LIBRARY since the latter has a symbol # that only the former provides and since these are both # static libraries the linker searches from left to right # and notes unresolved symbols as it goes!!! if (ROCKSDB_TOOLS_LIBRARY) set(ROCKSDB_LIBRARIES "${ROCKSDB_LIBRARIES};${ROCKSDB_TOOLS_LIBRARY};${ROCKSDB_LIBRARY}") endif() add_library(ROCKSDB::ROCKSDB UNKNOWN IMPORTED) set_target_properties(ROCKSDB::ROCKSDB PROPERTIES IMPORTED_LOCATION "${ROCKSDB_LIBRARY}" INTERFACE_INCLUDE_DIRECTORIES "${ROCKSDB_INCLUDE_DIR}" INTERFACE_LINK_LIBRARIES "${ROCKSDB_LIBRARIES}" INTERFACE_COMPILE_DEFINITIONS "HAVE_ROCKSDB=1") else () message(WARNING "Notice: rocksdb not found, no rocksdb support") endif() unset(ROCKSDB_INCLUDE_DIR) unset(ROCKSDB_LIBRARY) ================================================ FILE: cmake/FindScitokens.cmake ================================================ # Try to find scitokens # Once done, this will define # # SCITOKENS_FOUND - system has scitokens # SCITOKENS_INCLUDE_DIRS - scitokens include directories # # and the following imported targets # # SCITOKENS::SCITOKENS find_path(SCITOKENS_INCLUDE_DIR NAME scitokens/scitokens.h HINTS ${SCITOKENS_ROOT}) find_library(SCITOKENS_LIBRARY NAME SciTokens HINTS ${SCITOKENS_ROOT} PATH_SUFFIXES ${CMAKE_INSTALL_LIBDIR}) include(FindPackageHandleStandardArgs) find_package_handle_standard_args(Scitokens REQUIRED_VARS SCITOKENS_LIBRARY SCITOKENS_INCLUDE_DIR) mark_as_advanced(SCITOKENS_FOUND SCITOKENS_LIBRARY SCITOKENS_INCLUDE_DIR) if (SCITOKENS_FOUND AND NOT TARGET SCITOKENS::SCITOKENS) add_library(SCITOKENS::SCITOKENS UNKNOWN IMPORTED) set_target_properties(SCITOKENS::SCITOKENS PROPERTIES IMPORTED_LOCATION "${SCITOKENS_LIBRARY}" INTERFACE_INCLUDE_DIRECTORIES "${SCITOKENS_INCLUDE_DIR}" INTERFACE_COMPILE_DEFINITIONS HAVE_SCITOKENS) else() message(WARNING "Notice: scitokens not found, no scitokens support") add_library(SCITOKENS::SCITOKENS INTERFACE IMPORTED) endif() unset(SCITOKENS_LIBRARY) unset(SCITOKENS_INCLUDE_DIR) ================================================ FILE: cmake/FindSnappy.cmake ================================================ # - Find Snappy # Find the snappy compression library and includes # # Snappy_INCLUDE_DIRS - where to find snappy.h, etc. # Snappy_LIBRARIES - List of libraries when using snappy. # Snappy_FOUND - True if snappy found. find_path(Snappy_INCLUDE_DIRS NAMES snappy.h HINTS ${snappy_ROOT_DIR}/include) find_library(Snappy_LIBRARIES NAMES snappy HINTS ${snappy_ROOT_DIR}/lib) include(FindPackageHandleStandardArgs) find_package_handle_standard_args(Snappy DEFAULT_MSG Snappy_LIBRARIES Snappy_INCLUDE_DIRS) mark_as_advanced( Snappy_LIBRARIES Snappy_INCLUDE_DIRS) if(Snappy_FOUND AND NOT (TARGET Snappy::snappy)) add_library (Snappy::snappy UNKNOWN IMPORTED) set_target_properties(Snappy::snappy PROPERTIES IMPORTED_LOCATION ${Snappy_LIBRARIES} INTERFACE_INCLUDE_DIRECTORIES ${Snappy_INCLUDE_DIRS}) endif() ================================================ FILE: cmake/FindSparseHash.cmake ================================================ # Try to find SparseHash # Once done, this will define # # SPARSEHASH_FOUND - system has SparseHash # SPARSEHASH_INCLUDE_DIRS - SparseHash include directories # # and the following imported tags # # GOOGLE::SPARSEHASH find_path(SPARSEHASH_INCLUDE_DIR NAMES google/sparsehash/sparsehashtable.h HINTS ${SPARSEHASH_ROOT}) include(FindPackageHandleStandardArgs) find_package_handle_standard_args(SparseHash REQUIRED_VARS SPARSEHASH_INCLUDE_DIR) mark_as_advanced(SPARSEHASH_FOUND SPARSEHASH_INCLUDE_DIR) if (SPARSEHASH_FOUND AND NOT TARGET GOOGLE::SPARSEHASH) add_library(GOOGLE::SPARSEHASH INTERFACE IMPORTED) set_target_properties(GOOGLE::SPARSEHASH PROPERTIES INTERFACE_INCLUDE_DIRECTORIES "${SPARSEHASH_INCLUDE_DIR}") endif() set(SPARSEHASH_INCLUDE_DIRS ${SPARSEHASH_INCLUDE_DIR}) unset(SPARSEHASH_INCLUDE_DIR) ================================================ FILE: cmake/FindSphinx.cmake ================================================ # Try to find then sphinx executable # Once done, this will define # # SPHINX_FOUND - system has Sphinx # SPHINX_EXECUTABLE - Sphinx executable find_program(SPHINX_EXECUTABLE NAMES sphinx-build HINTS $ENV{SPHINX_DIR} PATH_SUFFIXES bin DOC "Sphinx documentation generator") include(FindPackageHandleStandardArgs) find_package_handle_standard_args(Sphinx DEFAULT_MSG SPHINX_EXECUTABLE) mark_as_advanced(SPHINX_EXECUTABLE) ================================================ FILE: cmake/FindXRootD.cmake ================================================ # Try to find XROOTD # Once done, this will define # # XROOTD_FOUND - system has XRootD # XROOTD_INCLUDE_DIRS - XRootD include directories # XROOTD_LIBRARIES - libraries needed to use XRootD # XROOTD_PRIVATE_INCLUDE_DIR - XRootD private include directory # # XROOTD_ROOT may be defined as a hint for where to look find_path(XROOTD_INCLUDE_DIR NAMES XrdVersion.hh HINTS ${XROOTD_ROOT} $ENV{XROOTD_ROOT} /opt/eos/xrootd/ PATH_SUFFIXES include/xrootd) find_path(XROOTD_PRIVATE_INCLUDE_DIR NAMES XrdOss/XrdOssApi.hh HINTS ${XROOTD_ROOT} $ENV{XROOTD_ROOT} /opt/eos/xrootd/ PATH_SUFFIXES include/xrootd/private) find_library(XROOTD_UTILS_LIBRARY NAMES XrdUtils HINTS ${XROOTD_ROOT} $ENV{XROOTD_ROOT} /opt/eos/xrootd/ PATH_SUFFIXES ${CMAKE_INSTALL_LIBDIR}) find_library(XROOTD_SERVER_LIBRARY NAMES XrdServer HINTS ${XROOTD_ROOT} $ENV{XROOTD_ROOT} /opt/eos/xrootd/ PATH_SUFFIXES ${CMAKE_INSTALL_LIBDIR}) find_library(XROOTD_CL_LIBRARY NAMES XrdCl HINTS ${XROOTD_ROOT} $ENV{XROOTD_ROOT} /opt/eos/xrootd/ PATH_SUFFIXES ${CMAKE_INSTALL_LIBDIR}) find_library(XROOTD_POSIX_LIBRARY NAMES XrdPosix HINTS ${XROOTD_ROOT} $ENV{XROOTD_ROOT} /opt/eos/xrootd/ PATH_SUFFIXES ${CMAKE_INSTALL_LIBDIR}) find_library(XROOTD_SSI_LIBRARY NAMES XrdSsiLib HINTS ${XROOTD_ROOT} $ENV{XROOTD_ROOT} /opt/eos/xrootd/ PATH_SUFFIXES ${CMAKE_INSTALL_LIBDIR}) find_library( XROOTD_HTTP_UTILS_LIBRARY NAMES XrdHttpUtils HINTS ${XROOTD_ROOT} $ENV{XROOTD_ROOT} /opt/eos/xrootd/ PATH_SUFFIXES ${CMAKE_INSTALL_LIBDIR}) set(XROOTD_INCLUDE_DIRS ${XROOTD_INCLUDE_DIR} ${XROOTD_PRIVATE_INCLUDE_DIR}) set(XROOTD_LIBRARIES ${XROOTD_SERVER_LIBRARY} ${XROOTD_CL_LIBRARY} ${XROOTD_UTILS_LIBRARY} ${XROOTD_POSIX_LIBRARY} ${XROOTD_SSI_LIBRARY} ${XROOTD_HHTP_UTILS_LIBRARY}) ############################################################################### # Figure out what is the plugin version ############################################################################### execute_process( COMMAND grep "#define XRDPLUGIN_SOVERSION" ${XROOTD_INCLUDE_DIR}/XrdVersion.hh OUTPUT_VARIABLE XRDPLUGIN_SOVERSION ) string( REGEX MATCH "[0123456789]+" XRDPLUGIN_SOVERSION ${XRDPLUGIN_SOVERSION} ) # Find XRootD dynamically loaded libraries find_library(XROOTD_DL_SECKRB5_LIBRARY NAMES XrdSeckrb5-${XRDPLUGIN_SOVERSION} HINTS ${XROOTD_ROOT} $ENV{XROOTD_ROOT} /opt/eos/xrootd/ PATH_SUFFIXES ${CMAKE_INSTALL_LIBDIR}) find_library(XROOTD_DL_SECGSI_LIBRARY NAMES XrdSecgsi-${XRDPLUGIN_SOVERSION} HINTS ${XROOTD_ROOT} $ENV{XROOTD_ROOT} /opt/eos/xrootd/ PATH_SUFFIXES ${CMAKE_INSTALL_LIBDIR}) find_library(XROOTD_DL_SECGSIAUTHZVO_LIBRARY NAMES XrdSecgsiAUTHZVO-${XRDPLUGIN_SOVERSION} HINTS ${XROOTD_ROOT} $ENV{XROOTD_ROOT} /opt/eos/xrootd/ PATH_SUFFIXES ${CMAKE_INSTALL_LIBDIR}) find_library(XROOTD_DL_SECGSIGMAPDN_LIBRARY NAMES XrdSecgsiGMAPDN-${XRDPLUGIN_SOVERSION} HINTS ${XROOTD_ROOT} $ENV{XROOTD_ROOT} /opt/eos/xrootd/ PATH_SUFFIXES ${CMAKE_INSTALL_LIBDIR}) find_library(XROOTD_DL_SECPWD_LIBRARY NAMES XrdSecpwd-${XRDPLUGIN_SOVERSION} HINTS ${XROOTD_ROOT} $ENV{XROOTD_ROOT} /opt/eos/xrootd/ PATH_SUFFIXES ${CMAKE_INSTALL_LIBDIR}) find_library(XROOTD_DL_SECSSS_LIBRARY NAMES XrdSecsss-${XRDPLUGIN_SOVERSION} HINTS ${XROOTD_ROOT} $ENV{XROOTD_ROOT} /opt/eos/xrootd/ PATH_SUFFIXES ${CMAKE_INSTALL_LIBDIR}) find_library(XROOTD_DL_SECUNIX_LIBRARY NAMES XrdSecunix-${XRDPLUGIN_SOVERSION} HINTS ${XROOTD_ROOT} $ENV{XROOTD_ROOT} /opt/eos/xrootd/ PATH_SUFFIXES ${CMAKE_INSTALL_LIBDIR}) set(XROOTD_DL_LIBRARIES ${XROOTD_DL_SECUNIX_LIBRARY} ${XROOTD_DL_SECSSS_LIBRARY} ${XROOTD_DL_SECPWD_LIBRARY} ${XROOTD_DL_SECGSIGMAPDN_LIBRARY} ${XROOTD_DL_SECGSIAUTHZVO_LIBRARY} ${XROOTD_DL_SECGSI_LIBRARY} ${XROOTD_DL_SECKRB5_LIBRARY}) include(FindPackageHandleStandardArgs) find_package_handle_standard_args(XRootD DEFAULT_MSG XROOTD_SERVER_LIBRARY XROOTD_UTILS_LIBRARY XROOTD_CL_LIBRARY XROOTD_POSIX_LIBRARY XROOTD_HTTP_UTILS_LIBRARY XROOTD_DL_SECUNIX_LIBRARY XROOTD_DL_SECSSS_LIBRARY XROOTD_DL_SECPWD_LIBRARY XROOTD_DL_SECGSIGMAPDN_LIBRARY XROOTD_DL_SECGSIAUTHZVO_LIBRARY XROOTD_DL_SECGSI_LIBRARY XROOTD_DL_SECKRB5_LIBRARY XROOTD_INCLUDE_DIR XROOTD_PRIVATE_INCLUDE_DIR) mark_as_advanced( XROOTD_SERVER_LIBRARY XROOTD_UTILS_LIBRARY XROOTD_HTTP_UTILS_LIBRARY XROOTD_CL_LIBRARY XROOTD_INCLUDE_DIR XROOTD_PRIVATE_INCLUDE_DIR XRDPLUGIN_SOVERSION) if (XROOTD_FOUND AND NOT TARGET XROOTD::SERVER) add_library(XROOTD::PRIVATE INTERFACE IMPORTED) set_target_properties(XROOTD::PRIVATE PROPERTIES INTERFACE_INCLUDE_DIRECTORIES "${XROOTD_PRIVATE_INCLUDE_DIR}" INTERFACE_SYSTEM_INCLUDE_DIRECTORIES "${XROOTD_PRIVATE_INCLUDE_DIR}") add_library(XROOTD::SERVER UNKNOWN IMPORTED) set_target_properties(XROOTD::SERVER PROPERTIES IMPORTED_LOCATION "${XROOTD_SERVER_LIBRARY}" INTERFACE_INCLUDE_DIRECTORIES "${XROOTD_INCLUDE_DIR}" INTERFACE_SYSTEM_INCLUDE_DIRECTORIES "${XROOTD_INCLUDE_DIR}") add_library(XROOTD::CL UNKNOWN IMPORTED) set_target_properties(XROOTD::CL PROPERTIES IMPORTED_LOCATION "${XROOTD_CL_LIBRARY}" INTERFACE_INCLUDE_DIRECTORIES "${XROOTD_INCLUDE_DIR}" INTERFACE_SYSTEM_INCLUDE_DIRECTORIES "${XROOTD_INCLUDE_DIR}") add_library(XROOTD::UTILS UNKNOWN IMPORTED) set_target_properties(XROOTD::UTILS PROPERTIES IMPORTED_LOCATION "${XROOTD_UTILS_LIBRARY}" INTERFACE_INCLUDE_DIRECTORIES "${XROOTD_INCLUDE_DIR}" INTERFACE_SYSTEM_INCLUDE_DIRECTORIES "${XROOTD_INCLUDE_DIR}") add_library(XROOTD::HTTP UNKNOWN IMPORTED) set_target_properties(XROOTD::HTTP PROPERTIES IMPORTED_LOCATION "${XROOTD_HTTP_UTILS_LIBRARY}" INTERFACE_INCLUDE_DIRECTORIES "${XROOTD_INCLUDE_DIR}" INTERFACE_SYSTEM_INCLUDE_DIRECTORIES "${XROOTD_INCLUDE_DIR}") add_library(XROOTD::SSI UNKNOWN IMPORTED) set_target_properties(XROOTD::SSI PROPERTIES IMPORTED_LOCATION "${XROOTD_SSI_LIBRARY}" INTERFACE_INCLUDE_DIRECTORIES "${XROOTD_INCLUDE_DIR}" INTERFACE_SYSTEM_INCLUDE_DIRECTORIES "${XROOTD_INCLUDE_DIR}") add_library(XROOTD::POSIX UNKNOWN IMPORTED) set_target_properties(XROOTD::POSIX PROPERTIES IMPORTED_LOCATION "${XROOTD_POSIX_LIBRARY}" INTERFACE_INCLUDE_DIRECTORIES "${XROOTD_INCLUDE_DIR}" INTERFACE_SYSTEM_INCLUDE_DIRECTORIES "${XROOTD_INCLUDE_DIR}") unset(XROOTD_INCLUDE_DIR) unset(XROOTD_PRIVATE_INCLUDE_DIR) endif () ================================================ FILE: cmake/FindZMQ.cmake ================================================ # Try to find ZMQ # Once done, this will define # # ZMQ_FOUND - system has ZMQ # ZMQ_INCLUDE_DIRS - ZMQ include directories # ZMQ_CPP_INCLUDE_DIR - ZMQ CPP binding i.e. zmq.hpp # ZMQ_LIBRARIES - libraries needed to use ZMQ # # and the following imported targets # # ZMQ::ZMQ find_path(ZMQ_INCLUDE_DIR NAMES zmq.h HINTS ${ZMQ_ROOT} PATH_SUFFIXES include) find_path(ZMQ_CPP_INCLUDE_DIR NAMES zmq.hpp HINTS ${ZMQ_ROOT} ${CMAKE_SOURCE_DIR}/common/ PATH_SUFFIXES cppzmq ) find_library(ZMQ_LIBRARY NAMES zmq HINTS ${ZMQ_ROOT} PATH_SUFFIXES ${CMAKE_INSTALL_LIBDIR}) include(FindPackageHandleStandardArgs) find_package_handle_standard_args(ZMQ REQUIRED_VARS ZMQ_LIBRARY ZMQ_INCLUDE_DIR ZMQ_CPP_INCLUDE_DIR) mark_as_advanced(ZMQ_LIBRARY ZMQ_INCLUDE_DIR ZMQ_CPP_INCLUDE_DIR) if (ZMQ_FOUND AND NOT TARGET ZMQ::ZMQ) add_library(ZMQ::ZMQ UNKNOWN IMPORTED) set_target_properties(ZMQ::ZMQ PROPERTIES IMPORTED_LOCATION "${ZMQ_LIBRARY}" INTERFACE_INCLUDE_DIRECTORIES "${ZMQ_INCLUDE_DIR};${ZMQ_CPP_INCLUDE_DIR}") message(STATUS "ZMQ_CPP_INCLUDE_DIR=${ZMQ_CPP_INCLUDE_DIR}") # Set variable in case we are using our own ZMQ C++ bindings if(NOT "${ZMQ_CPP_INCLUDE_DIR}" STREQUAL "${CMAKE_SOURCE_DIR}/utils") set_target_properties(ZMQ::ZMQ PROPERTIES INTERFACE_COMPILE_DEFINITIONS HAVE_DEFAULT_ZMQ) endif() endif() set(ZMQ_INCLUDE_DIRS ${ZMQ_CPP_INCLUDE_DIR}) set(ZMQ_LIBRARIES ${ZMQ_LIBRARY}) unset(ZMQ_CPP_INCLUDE_DIR) unset(ZMQ_INCLUDE_DIR) unset(ZMQ_LIBRARY) ================================================ FILE: cmake/Findabsl.cmake ================================================ # Try to find abseil library # Once done, this will define # # ABSL_FOUND - system has absl library # ABSL_INCLUDE_DIRS - absl include directories # ABSL_LIBRARIES - libraries needed to use absl ## find_path(ABSL_INCLUDE_DIR NAMES absl/base/config.h HINTS /opt/eos/grpc ${ABSL_ROOT} PATH_SUFFIXES include) set(libraries absl_synchronization absl_graphcycles_internal absl_stacktrace absl_symbolize absl_time absl_civil_time absl_time_zone absl_malloc_internal absl_debugging_internal absl_demangle_internal absl_strings absl_int128 absl_strings_internal absl_base absl_spinlock_wait absl_throw_delegate absl_raw_logging_internal absl_log_severity absl_log_internal_check_op absl_log_internal_message absl_cord_internal absl_cordz_info absl_cordz_sample_token absl_cord absl_cord_functions absl_hash absl_status absl_log_internal_nullguard absl_cordz_functions) foreach( lib ${libraries}) find_library(ABSL_${lib}_LIBRARY NAMES ${lib} HINTS /opt/eos/grpc ${ABSL_ROOT} PATH_SUFFIXES ${CMAKE_INSTALL_LIBDIR}) if(ABSL_${lib}_LIBRARY) set(ABSL_${lib}_FOUND 1) list(APPEND ABSL_LIBRARIES ${ABSL_${lib}_LIBRARY}) mark_as_advanced(ABSL_${lib}_LIBRARY) message(VERBOSE "ABSL_${lib}_LIBRARY") endif() endforeach() string (REPLACE ";" " " ABSL_LIBRARY "${ABSL_LIBRARIES}") include(FindPackageHandleStandardArgs) find_package_handle_standard_args(absl REQUIRED_VARS ABSL_LIBRARY ABSL_INCLUDE_DIR) mark_as_advanced(ABSL_INCLUDE_DIR ABSL_LIBRARY) message(VERBOSE "Abseil include path: ${ABSL_INCLUDE_DIR}") if (ABSL_FOUND AND NOT TARGET ABSL::ABSL) add_library(ABSL::ABSL UNKNOWN IMPORTED) set_target_properties(ABSL::ABSL PROPERTIES IMPORTED_LOCATION "${ABSL_absl_base_LIBRARY}" INTERFACE_INCLUDE_DIRECTORIES "${ABSL_INCLUDE_DIR}" INTERFACE_LINK_LIBRARIES "${ABSL_LIBRARIES}") include(CheckSymbolExists) # Check grpc logging function check_symbol_exists(gpr_set_log_function "${ABLS_INCLUDE_DIR}" HAVE_GRPC_LOGGING) if (HAVE_GRPC_LOGGING) message(STATUS "Grpc internal logging!") targe_compile_definitions(ABLS::ABLS PUBLIC HAVE_GRPC_LOGGING) else() message(STATUS "Grpc uses abseil logging!") endif() endif () unset(ABSL_INCLUDE_DIR) unset(ABSL_LIBRARIES) ================================================ FILE: cmake/Findbz2.cmake ================================================ # Try to find bz2 # Once done, this will define # # BZ2_FOUND - system has bz2 # BZ2_INCLUDE_DIRS - bz2 include directories # BZ2_LIBRARIES - bz2 library # # and the following imported target # # BZ2::BZ2 find_path(BZ2_INCLUDE_DIR NAMES bzlib.h HINTS ${BZ2_ROOT}) find_library(BZ2_LIBRARY NAMES bz2 HINTS ${BZ2_ROOT} PATH_SUFFIXES ${CMAKE_INSTALL_LIBDIR}) include(FindPackageHandleStandardArgs) find_package_handle_standard_args(bz2 DEFAULT_MSG BZ2_LIBRARY BZ2_INCLUDE_DIR) if (BZ2_FOUND AND NOT TARGET BZ2::BZ2) mark_as_advanced(BZ2_FOUND BZ2_LIBRARY BZ2_INCLUDE_DIR) add_library(BZ2::BZ2 UNKNOWN IMPORTED) set_target_properties(BZ2::BZ2 PROPERTIES IMPORTED_LOCATION "${BZ2_LIBRARY}" INTERFACE_INCLUDE_DIRECTORIES "${BZ2_INCLUDE_DIR}") endif() unset(BZ2_INCLUDE_DIR) unset(BZ2_LIBRARY) ================================================ FILE: cmake/Finddavix.cmake ================================================ # - Locate DAVIX library # Defines: # # DAVIX_FOUND - system has davix # # and the following imported targets # # DAVIX::DAVIX find_path(DAVIX_INCLUDE_DIR NAMES davix.hpp HINTS /usr ${DAVIX_ROOT} $ENV{DAVIX_ROOT} PATH_SUFFIXES include/davix) find_library(DAVIX_LIBRARY NAMES davix HINTS /usr ${DAVIX_ROOT} $ENV{DAVIX_ROOT} PATH_SUFFIXES ${CMAKE_INSTALL_LIBDIR}) include(FindPackageHandleStandardArgs) find_package_handle_standard_args(davix REQUIRED_VARS DAVIX_LIBRARY DAVIX_INCLUDE_DIR) mark_as_advanced(DAVIX_FOUND DAVIX_LIBRARY DAVIX_INCLUDE_DIR) if (DAVIX_FOUND AND NOT TARGET DAVIX::DAVIX) add_library(DAVIX::DAVIX UNKNOWN IMPORTED) set_target_properties(DAVIX::DAVIX PROPERTIES IMPORTED_LOCATION "${DAVIX_LIBRARY}" INTERFACE_INCLUDE_DIRECTORIES "${DAVIX_INCLUDE_DIR}" INTERFACE_COMPILE_DEFINITIONS HAVE_DAVIX) else() message(WARNING "Notice: davix not found, no davix support") add_library(DAVIX::DAVIX INTERFACE IMPORTED) endif() unset(DAVIX_LIBRARY) unset(DAVIX_INCLUDE_DIR) ================================================ FILE: cmake/Findeosfolly.cmake ================================================ # Try to find eos folly. # Once done, this will define # # EOS_FOLLY_FOUND - system has eos-folly # EOS_FOLLY_INCLUDE_DIRS - eos-folly include directories # EOS_FOLLY_LIBRARIES - eos-folly library library # # EOS_FOLLY_ROOT may be defined as a hint for where to look # # and the following imported targets # # FOLLY::FOLLY find_path(EOSFOLLY_INCLUDE_DIR NAMES folly/folly-config.h HINTS ${EOSFOLLY_ROOT} PATHS /opt/eos-folly /usr/local /usr PATH_SUFFIXES include) find_library(EOSFOLLY_LIBRARY NAMES libfolly.so HINTS ${EOSFOLLY_ROOT} PATHS /opt/eos-folly /usr/local /usr PATH_SUFFIXES lib lib64) include(FindPackageHandleStandardArgs) find_package_handle_standard_args(eosfolly REQUIRED_VARS EOSFOLLY_LIBRARY EOSFOLLY_INCLUDE_DIR) mark_as_advanced(EOSFOLLY_FOUND EOSFOLLY_LIBRARY EOSFOLLY_INCLUDE_DIR) if(EOSFOLLY_FOUND AND NOT TARGET FOLLY::FOLLY) # Note: this target is not used for the moment as the folly lib is only # liked directly into qclient. add_library(FOLLY::FOLLY STATIC IMPORTED) set_target_properties(FOLLY::FOLLY PROPERTIES IMPORTED_LOCATION "${EOSFOLLY_LIBRARY}" INTERFACE_INCLUDE_DIRECTORIES "${EOSFOLLY_INCLUDE_DIR}" INTERFACE_SYSTEM_INCLUDE_DIRECTORIES "${EOSFOLLY_INCLUDE_DIR}" INTERFACE_COMPILE_DEFINITIONS HAVE_FOLLY) endif() # This is done to preserve compatibility with qclient set(FOLLY_INCLUDE_DIRS ${EOSFOLLY_INCLUDE_DIR}) set(FOLLY_LIBRARIES ${EOSFOLLY_LIBRARY} glog gflags) set(FOLLY_FOUND TRUE) unset(EOSFOLLY_LIBRARY) unset(EOSFOLLY_INCLUDE_DIR) ================================================ FILE: cmake/Findfuse.cmake ================================================ # Try to find fuse (devel) # Once done, this will define # # FUSE_FOUND - system has fuse # FUSE_INCLUDE_DIRS - fuse include directories # FUSE_LIBRARIES - libraries need to use fuse # FUSE_MOUNT_VERSION - major version reported by fusermount # # and the following imported target # FUSE::FUSE find_package(PkgConfig) pkg_check_modules(PC_fuse QUIET fuse) set(FUSE_VERSION ${PC_fuse_VERSION}) find_path(FUSE_INCLUDE_DIR NAMES fuse/fuse_lowlevel.h HINTS ${FUSE_ROOT} ${PC_fuse_INCLUDEDIR} ${PC_fuse_INCLUDE_DIRS} PATH_SUFFIXES include include/osxfuse) if(MacOSX) find_library(FUSE_LIBRARY NAMES osxfuse HINTS ${FUSE_ROOT} ${PC_fuse_LIBDIR} ${PC_fuse_LIBRARY_DIRS} PATH_SUFFIXES ${CMAKE_INSTALL_LIBDIR}) else() find_library(FUSE_LIBRARY NAMES fuse HINTS ${FUSE_ROOT} ${PC_fuse_LIBDIR} ${PC_fuse_LIBRARY_DIRS} PATH_SUFFIXES$ ${CMAKE_INSTALL_LIBDIR}) execute_process( COMMAND sh -c "fusermount --version | cut -d ' ' -f 3 | cut -d '.' -f 1,2 | sed s/'\\.'//g" OUTPUT_VARIABLE FUSE_MOUNT_VERSION OUTPUT_STRIP_TRAILING_WHITESPACE RESULT_VARIABLE RETC) if(NOT ("${RETC}" STREQUAL "0") ) set(${FUSE_MOUNT_VERSION} "" PARENT_SCOPE) endif() message(STATUS "Setting FUSE_MOUNT_VERSION: ${FUSE_MOUNT_VERSION}") endif() include(FindPackageHandleStandardArgs) find_package_handle_standard_args(fuse REQUIRED_VARS FUSE_LIBRARY FUSE_INCLUDE_DIR VERSION_VAR FUSE_VERSION) if (FUSE_FOUND AND NOT TARGET FUSE::FUSE) mark_as_advanced(FUSE_INCLUDE_DIR FUSE_LIBRARY) add_library(FUSE::FUSE UNKNOWN IMPORTED) set_target_properties(FUSE::FUSE PROPERTIES IMPORTED_LOCATION "${FUSE_LIBRARY}" INTERFACE_INCLUDE_DIRECTORIES "${FUSE_INCLUDE_DIR}") endif() set(FUSE_INCLUDE_DIRS ${FUSE_INCLUDE_DIR}) set(FUSE_LIBRARIES ${FUSE_LIBRARY}) unset(FUSE_INCLDUE_DIR) unset(FUSE_LIBRARY) ================================================ FILE: cmake/Findfuse3.cmake ================================================ # Try to find fuse (devel) # Once done, this will define # # FUSE3_FOUND - system has fuse # # and the following imported target # # FUSE3::FUSE3 find_path(FUSE3_INCLUDE_DIR NAMES fuse3/fuse_lowlevel.h HINTS ${FUSE3_ROOT}) find_library(FUSE3_LIBRARY NAMES fuse3 HINTS ${FUSE3_ROOT} PATH_SUFFIXES ${CMAKE_INSTALL_LIBDIR}) include(FindPackageHandleStandardArgs) find_package_handle_standard_args(fuse3 REQUIRED_VARS FUSE3_LIBRARY FUSE3_INCLUDE_DIR) if (FUSE3_FOUND AND NOT TARGET FUSE3::FUSE3) mark_as_advanced(FUSE3_INCLUDE_DIR FUSE3_LIBRARY) add_library(FUSE3::FUSE3 UNKNOWN IMPORTED) set_target_properties(FUSE3::FUSE3 PROPERTIES IMPORTED_LOCATION "${FUSE3_LIBRARY}" INTERFACE_INCLUDE_DIRECTORIES "${FUSE3_INCLUDE_DIR}" INTERFACE_COMPILE_DEFINITIONS "USE_FUSE3=1") else() message(WARNING "Notice: fuse3 not found, no fuse3 support") add_library(FUSE3::FUSE3 INTERFACE IMPORTED) endif() unset(FUSE3_INCLUDE_DIR) unset(FUSE3_LIBRARY) ================================================ FILE: cmake/Findglibc.cmake ================================================ # Try to find glibc-devel # Once done, this will define # # GLIBC_FOUND - system has glibc-devel # and the following imported targets # # GLIBC::DL # GLIBC::RT find_library(GLIBC_DL_LIBRARY NAMES dl HINTS ${GLIBC_ROOT} PATH_SUFFIXES ${CMAKE_INSTALL_LIBDIR}) find_library(GLIBC_RT_LIBRARY NAMES rt HINTS ${GLIBC_ROOT} PATH_SUFFIXES ${CMAKE_INSTALL_LIBDIR}) # Math library find_library(GLIBC_M_LIBRARY NAMES m HINTS ${GLIBC_ROOT} PATH_SUFFIXES ${CMAKE_INSTALL_LIBDIR}) include(FindPackageHandleStandardArgs) find_package_handle_standard_args(glibc REQUIRED_VARS GLIBC_DL_LIBRARY GLIBC_RT_LIBRARY GLIBC_M_LIBRARY) mark_as_advanced(GLIBC_DL_LIBRARY GLIBC_RT_LIBRARY GLIBC_M_LIBRARY) if (GLIBC_FOUND AND NOT TARGET GLIBC::DL) add_library(GLIBC::DL UNKNOWN IMPORTED) set_target_properties(GLIBC::DL PROPERTIES IMPORTED_LOCATION "${GLIBC_DL_LIBRARY}") add_library(GLIBC::RT UNKNOWN IMPORTED) set_target_properties(GLIBC::RT PROPERTIES IMPORTED_LOCATION "${GLIBC_RT_LIBRARY}") add_library(GLIBC::M UNKNOWN IMPORTED) set_target_properties(GLIBC::M PROPERTIES IMPORTED_LOCATION "${GLIBC_M_LIBRARY}") endif() unset(GLIBC_DL_LIBRARY) unset(GLIBC_RT_LIBRARY) unset(GLIBC_M_LIBRARY) ================================================ FILE: cmake/Findhelp2man.cmake ================================================ # Locate help2man executable # Defines: # # HELP2MAN_FOUND - system has help2man # HELP2MAN_EXECUTABLE - help2man executable include(FindPackageHandleStandardArgs) find_program(HELP2MAN_EXECUTABLE NAMES help2man HINTS /usr $ENV{HELP2MAN_DIR} PATH_SUFFIXES bin) find_package_handle_standard_args( help2man DEFAULT_MSG HELP2MAN_EXECUTABLE) mark_as_advanced(HELP2MAN_EXECUTABLE) ================================================ FILE: cmake/Findisal.cmake ================================================ # Try to find libisa-l (devel) # Once done, this will define # # ISAL_FOUND - system has isa-l # ISAL_INCLUDE_DIRS - isa-l include directories # ISAL_LIBRARIES - isa-l library # ISAL_LIBRARY_STATIC - isa-l static library # # and the following imported targets # # ISAL::ISAL # ISAL::ISAL_STATIC find_path(ISAL_INCLUDE_DIR NAMES isa-l.h HINTS ${ISAL_ROOT} PATH_SUFFIXES include) find_library(ISAL_LIBRARY NAME isal HINTS ${ISAL_ROOT} PATH_SUFFIXES ${CMAKE_INSTALL_LIBDIR}) include(FindPackageHandleStandardArgs) find_package_handle_standard_args(isal REQUIRED_VARS ISAL_LIBRARY ISAL_INCLUDE_DIR) mark_as_advanced(ISAL_LIBRARY ISAL_INCLUDE_DIR) if (ISAL_FOUND AND NOT TARGET ISAL::ISAL) add_library(ISAL::ISAL UNKNOWN IMPORTED) set_target_properties(ISAL::ISAL PROPERTIES IMPORTED_LOCATION "${ISAL_LIBRARY}" INTERFACE_INCLUDE_DIRECTORIES "${ISAL_INCLUDE_DIR}") target_compile_definitions(ISAL::ISAL INTERFACE ISAL_FOUND) else() message(WARNING "Notice: ISAL not found, no ISAL support") add_library(ISAL::ISAL INTERFACE IMPORTED) endif() unset(ISAL_INCLUDE_DIR) unset(ISAL_LIBRARY) ================================================ FILE: cmake/Findisal_crypto.cmake ================================================ # Try to find libisa-l_crypto (devel) # Once done, this will define # # ISALCRYPTO_FOUND - system has isa-l_crypto # ISALCRYPTO_INCLUDE_DIRS - the isa-l_crypto include directories # ISALCRYPTO_LIBRARIES - isa-l_crypto libraries directories # ISALCRYPTO_LIBRARY_STATIC - isa-l_crypto static library # # and the following imported targets # # ISAL::ISAL_CRYPTO # ISAL::ISAL_CRYPTO_STATIC find_path(ISAL_CRYPTO_INCLUDE_DIR NAMES isa-l_crypto.h HINTS ${ISAL_CRYPTO_ROOT} PATH_SUFFIXES include) find_library(ISAL_CRYPTO_LIBRARY NAME isal_crypto HINTS ${ISAL_CRYPTO_ROOT} PATH_SUFFIXES ${CMAKE_INSTALL_LIBDIR}) include(FindPackageHandleStandardArgs) find_package_handle_standard_args(isal_crypto REQUIRED_VARS ISAL_CRYPTO_LIBRARY ISAL_CRYPTO_INCLUDE_DIR) mark_as_advanced(ISAL_CRYPTO_LIBRARY ISAL_CRYPTO_INCLUDE_DIR) if (ISAL_CRYPTO_FOUND AND NOT TARGET ISAL::ISAL_CRYPTO) add_library(ISAL::ISAL_CRYPTO UNKNOWN IMPORTED) set_target_properties(ISAL::ISAL_CRYPTO PROPERTIES IMPORTED_LOCATION "${ISAL_CRYPTO_LIBRARY}" INTERFACE_INCLUDE_DIRECTORIES "${ISAL_CRYPTO_INCLUDE_DIR}") target_compile_definitions(ISAL::ISAL_CRYPTO INTERFACE ISALCRYPTO_FOUND) else() message(WARNING "Notice: ISAL_CRYPTO not found, no ISAL_CRYPTO support") add_library(ISAL::ISAL_CRYPTO INTERFACE IMPORTED) endif() unset(ISAL_CRYPTO_INCLUDE_DIR) unset(ISAL_CRYPTO_LIBRARY) ================================================ FILE: cmake/Findjemalloc.cmake ================================================ # Try to find jemalloc # Once done, this will define # # JEMALLOC_FOUND - system has jemalloc # JEMALLOC_INCLUDE_DIRS - jemalloc include directories # JEMALLOC_LIBRARIES - libraries needed to use jemalloc # # and the following imported targets # # JEMALLOC::JEMALLOC # find_path(JEMALLOC_INCLUDE_DIR NAMES jemalloc.h HINTS ${JEMALLOC_ROOT} PATH_SUFFIXES include jemalloc) find_library(JEMALLOC_LIBRARY NAME jemalloc HINTS ${JEMALLOC_ROOT} PATH_SUFFIXES ${CMAKE_INSTALL_LIBDIR}) include(FindPackageHandleStandardArgs) find_package_handle_standard_args(jemalloc REQUIRED_VARS JEMALLOC_LIBRARY JEMALLOC_INCLUDE_DIR) mark_as_advanced(JEMALLOC_FOUND JEMALLOC_INCLUDE_DIR JEMALLOC_LIBRARY) message(STATUS "Jemalloc include path: ${JEMALLOC_INCLUDE_DIR}") if (JEMALLOC_FOUND AND NOT TARGET JEMALLOC::JEMALLOC) add_library(JEMALLOC::JEMALLOC UNKNOWN IMPORTED) set_target_properties(JEMALLOC::JEMALLOC PROPERTIES IMPORTED_LOCATION "${JEMALLOC_LIBRARY}" INTERFACE_INCLUDE_DIRECTORIES "${JEMALLOC_INCLUDE_DIR}") else() message(WARNING "Notice: jemalloc not found, no jemalloc support") add_library(JEMALLOC::JEMALLOC INTERFACE IMPORTED) endif() ================================================ FILE: cmake/Findjsoncpp.cmake ================================================ # Try to find libjsoncpp # Once done, this will define # # JSONCPP_FOUND - system has jsoncpp # JSONCPP_INCLUDE_DIRS - the jsoncpp include directories # JSONCPP_LIBRARIES - libaries needed to use jsoncpp # # and the following imported target # # JSONCPP::JSONCPP find_package(PkgConfig) pkg_check_modules(PC_JSONCPP QUIET jsoncpp) set(JSONCPP_VERSION ${PC_JSONCPP_VERSION}) find_path(JSONCPP_INCLUDE_DIR NAMES json/json.h HINTS ${JSONCPP_ROOT} PATH_SUFFIXES jsoncpp ) find_library(JSONCPP_LIBRARY NAMES jsoncpp HINTS ${JSONCPP_ROOT} PATH_SUFFIXES ${CMAKE_INSTALL_LIBDIR}) include(FindPackageHandleStandardArgs) find_package_handle_standard_args(jsoncpp REQUIRED_VARS JSONCPP_INCLUDE_DIR JSONCPP_LIBRARY VERSION_VAR JSON_VERSION) mark_as_advanced(JSONCPP_INCLUDE_DIR JSONCPP_LIBRARY) if (JSONCPP_FOUND AND NOT TARGET JSONCPP::JSONCPP) add_library(JSONCPP::JSONCPP UNKNOWN IMPORTED) set_target_properties(JSONCPP::JSONCPP PROPERTIES IMPORTED_LOCATION "${JSONCPP_LIBRARY}" INTERFACE_INCLUDE_DIRECTORIES "${JSONCPP_INCLUDE_DIR}") endif() set(JSONCPP_INCLUDE_DIRS ${JSONCPP_INCLUDE_DIR}) set(JSONCPP_LIBRARIES ${JSONCPP_LIBRARY}) unset(JSONCPP_INCLUDE_DIR) unset(JSONCP_LIBRARY) ================================================ FILE: cmake/Findkrb5.cmake ================================================ # Try to find Kerberos5 # Check for libkrb5.a # # KRB5_FOUND - True if Kerberos 5 libraries found. # KRB5_INCLUDE_DIR - where to find krb5.h, etc. # KRB5_LIBRARIES - List of libraries needed to use krb5 # # and the following imported targets # # KRB5::KRB5 find_package(PkgConfig) pkg_check_modules(PC_krb5 QUIET krb5) find_path(KRB5_INCLUDE_DIR NAMES krb5/krb5.h HINTS ${KRB5_ROOT} ${PC_krb5_INCLUDEDIR} ${PC_kbr5_INCLUDE_DIRS}) find_library(KRB5_MIT_LIBRARY NAMES k5crypto HINTS ${KRB5_ROOT} ${PC_krb5_LIBDIR} ${PC_krb5_LIBRARY_DIRS}) find_library(KRB5_LIBRARY NAMES krb5 HINTS ${KRB5_ROOT} ${PC_krb5_LIBDIR} ${PC_krb5_LIBRARY_DIRS}) find_program(KRB5_INIT NAMES kinit HINTS ${KRB5_ROOT} /usr/bin/ /usr/local/bin) include(FindPackageHandleStandardArgs) find_package_handle_standard_args(krb5 REQUIRED_VARS KRB5_LIBRARY KRB5_INCLUDE_DIR KRB5_MIT_LIBRARY) mark_as_advanced(KRB5_INCLUDE_DIR KRB5_MIT_LIBRARY KRB4_MIT_LIBRARY) if (KRB5_FOUND AND NOT TARGET KRB5::KRB5) add_library(KRB5::KRB5 UNKNOWN IMPORTED) set_target_properties(KRB5::KRB5 PROPERTIES IMPORTED_LOCATION "${KRB5_LIBRARY}" INTERFACE_LINK_LIBRARIES "${KRB5_MIT_LIBRARY}" INTERFACE_INCLUDE_DIRECTORIES "${KRB5_INCLUDE_DIR}") endif() set(KRB5_INCLUDE DIRS ${KRB5_INCLUDE_DIR}) set(KRB5_LIBRARIES ${KRB5_LIBRARY} ${KRB5_MIT_LIBRARY}) unset(KRB5_INCLUDE_DIR) unset(KRB5_LIBRARY) unset(KRB5_MIT_LIBRARY) ================================================ FILE: cmake/Findldap.cmake ================================================ # Try to find attr # Once done, this will define # # LDAP_FOUND - system has libldap # LDAP_INCLUDE_DIRS - ldap include directories # LDAP_LIBRARIES - ldap libraries directories # # and the following imported target # # LDAP::LDAP find_path(LDAP_INCLUDE_DIR NAMES ldap.h HINTS ${LDAP_ROOT} PATH_SUFFIXES include) find_library(LDAP_LIBRARY NAMES ldap HINTS ${LDAP_ROOT} PATH_SUFFIEX ${CMAKE_INSTALL_LIBDIR}) include(FindPackageHandleStandardArgs) find_package_handle_standard_args(ldap REQUIRED_VARS LDAP_LIBRARY LDAP_INCLUDE_DIR) mark_as_advanced(LDAP_LIBRARY LDAP_INCLUDE_DIR) if (LDAP_FOUND AND NOT TARGET LDAP::LDAP) add_library(LDAP::LDAP UNKNOWN IMPORTED) set_target_properties(LDAP::LDAP PROPERTIES IMPORTED_LOCATION "${LDAP_LIBRARY}" INTERFACE_INCLUDE_DIRECTORIES "${LDAP_INCLUDE_DIR}") endif() set(LDAP_INCLUDE_DIRS ${LDAP_INCLUDE_DIR}) set(LDAP_LIBRARIES ${LDAP_LIBRARY}) ================================================ FILE: cmake/Findlibbfd.cmake ================================================ # Try to find libbfd # Once done, this will define # # LIBBFD_FOUND - system has libbfd # LIBBFD_INCLUDE_DIRS - libbfd include directories # LIBBFD_LIBRARIES - libbfd library # # LIBBFD_ROOT_DIR may be defined as a hint for where to look find_path(LIBBFD_INCLUDE_DIR NAMES bfd.h HINTS /opt/rh/devtoolset-8/root ${LIBBFD_ROOT} PATH_SUFFIXES include usr/include) find_library(LIBBFD_LIBRARY NAMES bfd HINTS /opt/rh/devtoolset-8/root ${LIBBFD_ROOT} PATH_SUFFIXES lib lib64) find_library(LIBIBERTY_LIBRARY NAMES iberty HINTS /opt/rh/devtoolset-8/root ${LIBBFD_ROOT} PATH_SUFFIXES lib lib64) include(FindPackageHandleStandardArgs) find_package_handle_standard_args(libbfd REQUIRE_VARS LIBBFD_LIBRARY LIBIBERTY_LIBRARY LIBBFD_INCLUDE_DIR) if (LIBBFD_FOUND AND NOT TARGET LIBBFD::LIBBFD) add_library(LIBBFD::LIBBFD UNKNOWN IMPORTED) set_target_properties(LIBBFD::LIBBFD PROPERTIES IMPORTED_LOCATION "${LIBBFD_LIBRARY}" INTERFACE_INCLUDE_DIRECTORIES "${LIBBFD_INCLUDE_DIR}") add_library(LIBBFD::IBERTY UNKNOWN IMPORTED) set_target_properties(LIBBFD::IBERTY PROPERTIES IMPORTED_LOCATION "${LIBIBERTY_LIBRARY}" INTERFACE_INCLUDE_DIRECTORIES "${LIBBFD_INCLUDE_DIR}") else() message(WARNING "Notice: libbfd not found, no libbfd support") add_library(LIBBFD::LIBBFD INTERFACE IMPORTED) add_library(LIBBFD::IBERTY INTERFACE IMPORTED) endif() unset(LIBBFD_INCLUDE_DIR) unset(LIBBFD_LIBRARY) unset(LIBIBERTY_LIBRARY) ================================================ FILE: cmake/Findlibproc2.cmake ================================================ #.rst: # Findlibproc2 # ------- # # Find libproc2 from procps 4.x. # # Imported Targets # ^^^^^^^^^^^^^^^^ # # This module defines :prop_tgt:`IMPORTED` target: # # ``procps::libproc2`` # The libproc2 library, if found. # # Result Variables # ^^^^^^^^^^^^^^^^ # # This module will set the following variables in your project: # # ``LIBPROC2_FOUND`` # True if libproc2 has been found. # ``LIBPROC2_INCLUDE_DIRS`` # Where to find libproc2/misc.h, etc. # ``LIBPROC2_LIBRARIES`` # The libraries to link against to use libproc2. # ``LIBPROC2_VERSION`` # The version of the libproc2 library found (e.g. 4.0.2) # find_package(PkgConfig QUIET) if(PKG_CONFIG_FOUND) if(${libproc2_FIND_REQUIRED}) set(LIBPROC2_REQUIRED REQUIRED) endif() if(NOT DEFINED LIBPROC2_FIND_VERSION) pkg_check_modules(LIBPROC2 ${LIBPROC2_REQUIRED} libproc2) else() pkg_check_modules(LIBPROC2 ${LIBPROC2_REQUIRED} libproc2>=${LIBPROC2_FIND_VERSION}) endif() set(LIBPROC2_LIBRARIES ${LIBPROC2_LDFLAGS}) set(LIBPROC2_LIBRARY ${LIBPROC2_LIBRARIES}) set(LIBPROC2_INCLUDE_DIRS ${LIBPROC2_INCLUDE_DIRS}) set(LIBPROC2_INCLUDE_DIR ${LIBPROC2_INCLUDE_DIRS}) else () if (DEFINED LIBPROC2_FIND_VERSION) find_program(PS_BIN ps REQUIRED) message(DEBUG "info: found ps binary in ${PS_BIN}") if (PS_BIN) execute_process(COMMAND sh -c "${PS_BIN} -V | cut -d ' ' -f 4" OUTPUT_VARIABLE LIBPROC2_VER OUTPUT_STRIP_TRAILING_WHITESPACE RESULT_VARIABLE RETC) if (NOT ("${RETC}" STREQUAL "0")) message(FATAL_ERROR "error: failed while calling ${PS_BIN} to get version") endif() message(DEBUG "info: ps version is ${LIBPROC2_VER}") if (NOT "${LIBPROC2_VER}" VERSION_GREATER_EQUAL "${LIBPROC2_FIND_VERSION}") message(FATAL_ERROR "error: procps version ${LIBPROC2_VER} less than " "requested ${LIBPROC_FIND_VERSION}") endif() else () message(FATAL_ERROR "error: could not find ps binary") endif() endif() find_path(LIBPROC2_INCLUDE_DIR NAMES libproc2/pids.h HINTS ${LIBPROC2_ROOT} PATH_SUFFIXES include) find_library(LIBPROC2_LIBRARY NAMES proc2 HINTS ${LIBPROC2_ROOT} PATH_SUFFIXES ${CMAKE_INSTALL_LIBDIR}) set(LIBPROC2_LIBRARIES ${LIBPROC2_LIBRARY}) set(LIBPROC2_INCLUDE_DIRS ${LIBPROC2_INCLUDE_DIR}) endif() include(FindPackageHandleStandardArgs) find_package_handle_standard_args(libproc2 REQUIRED_VARS LIBPROC2_LIBRARY LIBPROC2_INCLUDE_DIR) mark_as_advanced(LIBPROC2_INCLUDE_DIR LIBPROC2_LIBRARY) if(LIBPROC2_FOUND AND NOT TARGET procps::libproc2) add_library(procps::libproc2 INTERFACE IMPORTED) set_property(TARGET procps::libproc2 PROPERTY INTERFACE_INCLUDE_DIRECTORIES "${LIBPROC2_INCLUDE_DIRS}") set_property(TARGET procps::libproc2 PROPERTY INTERFACE_LINK_LIBRARIES "${LIBPROC2_LIBRARIES}") endif() message(DEBUG "LIBPROC2_FOUND = ${LIBPROC2_FOUND}") message(DEBUG "LIBPROC2_VERSION = ${LIBPROC2_VERSION}") message(DEBUG "LIBPROC2_INCLUDE_DIRS = ${LIBPROC2_INCLUDE_DIRS}") message(DEBUG "LIBPROC2_LIBRARIES = ${LIBPROC2_LIBRARIES}") message(DEBUG "libproc2_FIND_REQUIRED = ${libproc2_FIND_REQUIRED}") ================================================ FILE: cmake/Findlibunwind.cmake ================================================ # Find the libunwind library # # LIBUNWIND_FOUND - True if libunwind was found. # LIBUNWIND_LIBRARIES - The libraries needed to use libunwind # LIBUNWIND_INCLUDE_DIR - Location of unwind.h and libunwind.h # INPUT: LIBUNWIND_ROOT - path where include + lib of libunwind installation is located FIND_PATH(LIBUNWIND_INCLUDE_DIR libunwind.h HINTS /usr/ PATH_SUFFIXES include PATHS "${LIBUNWIND_ROOT}/include" ) FIND_LIBRARY(LIBUNWIND_GENERIC_LIBRARY libunwind.a unwind HINTS /usr/ PATH_SUFFIXES lib PATHS "${LIBUNWIND_ROOT}/lib" ) if (LIBUNWIND_INCLUDE_DIR) # nothing if (LIBUNWIND_GENERIC_LIBRARY) SET(LIBUNWIND_LIBRARIES ${LIBUNWIND_GENERIC_LIBRARY}) # For some reason, we have to link to two libunwind shared object files: # one arch-specific and one not. if (CMAKE_SYSTEM_PROCESSOR MATCHES "^arm") SET(LIBUNWIND_ARCH "arm") elseif (CMAKE_SYSTEM_PROCESSOR STREQUAL "x86_64" OR CMAKE_SYSTEM_PROCESSOR STREQUAL "amd64") SET(LIBUNWIND_ARCH "x86_64") elseif (CMAKE_SYSTEM_PROCESSOR MATCHES "^i.86$") SET(LIBUNWIND_ARCH "x86") endif() if (LIBUNWIND_ARCH) FIND_LIBRARY(LIBUNWIND_SPECIFIC_LIBRARY libunwind-${LIBUNWIND_ARCH}.a "unwind-${LIBUNWIND_ARCH}" HINTS /usr/ PATH_SUFFIXES lib PATHS "${LIBUNWIND_ROOT}" ) if (NOT LIBUNWIND_SPECIFIC_LIBRARY) MESSAGE(FATAL_ERROR "failed to find unwind-${LIBUNWIND_ARCH}") endif () SET(LIBUNWIND_LIBRARIES ${LIBUNWIND_LIBRARIES} ${LIBUNWIND_SPECIFIC_LIBRARY}) endif(LIBUNWIND_ARCH) include(FindPackageHandleStandardArgs) find_package_handle_standard_args(libunwind DEFAULT_MSG LIBUNWIND_INCLUDE_DIR) MARK_AS_ADVANCED(LIBUNWIND_LIBRARIES LIBUNWIND_INCLUDE_DIR) else (LIBUNWIND_GENERIC_LIBRARY) MESSAGE("-- Could NOT find Libunwind ") endif (LIBUNWIND_GENERIC_LIBRARY) else (LIBUNWIND_INCLUDE_DIR) MESSAGE("-- Could NOT find libunwind.h") endif (LIBUNWIND_INCLUDE_DIR) ================================================ FILE: cmake/Findlz4.cmake ================================================ # Try to find lz4 # Once done, this will define # # LZ4_FOUND - system has bz2 # LZ4_INCLUDE_DIRS - bz2 include directories # LZ4_LIBRARIES - bz2 library # # and the following imported target # # LZ4::LZ4 find_library(LZ4_LIBRARY NAMES liblz4.so.1 liblz4.1.dylib HINTS ${LZ4_ROOT} PATH_SUFFIXES ${CMAKE_INSTALL_LIBDIR}) include(FindPackageHandleStandardArgs) find_package_handle_standard_args(lz4 DEFAULT_MSG LZ4_LIBRARY) if (LZ4_FOUND AND NOT TARGET LZ4::LZ4) mark_as_advanced(LZ4_FOUND LZ4_LIBRARY) add_library(LZ4::LZ4 UNKNOWN IMPORTED) set_target_properties(LZ4::LZ4 PROPERTIES IMPORTED_LOCATION "${LZ4_LIBRARY}") endif() unset(LZ4_LIBRARY) ================================================ FILE: cmake/Findncurses.cmake ================================================ # Try to find libncurses # Once done, this will define # # NCURSES_FOUND - system has libncurses # NCURSES_INCLUDE_DIRS - libncurses include directories # NCURSES_LIBRARIES - ncurses library # # and the following imported targets # # NCURSES::NCURSES find_package(PkgConfig) pkg_check_modules(PC_ncurses QUIET ncurses) set(NCURSES_VERSION ${PC_ncurses_VERSION}) find_path(NCURSES_INCLUDE_DIR NAMES curses.h HINTS ${NCURSES_ROOT} ${PC_ncurses_INCLUDEDIR} ${PC_ncurses_INCLUDE_DIRS} PATH_SUFFIXES include) find_library(NCURSES_LIBRARY NAMES ncurses HINTS ${NCURSES_ROOT} ${PC_ncurses_LIBDIR} ${PC_ncurses_LIBRARY_DIRS} PATH_SUFFIXES ${CMAKE_INSTALL_LIBDIR}) include(FindPackageHandleStandardArgs) find_package_handle_standard_args(ncurses REQUIRED_VARS NCURSES_LIBRARY NCURSES_INCLUDE_DIR VERSION_VAR NCURSES_VERSION) mark_as_advanced(NCURSES_FOUND NCURSES_LIBRARY NCURSES_INCLUDE_DIR) if (NCURSES_FOUND AND NOT TARGET NCURSES::NCURSES) add_library(NCURSES::NCURSES UNKNOWN IMPORTED) set_target_properties(NCURSES::NCURSES PROPERTIES IMPORTED_LOCATION "${NCURSES_LIBRARY}" INTERFACE_INCLUDE_DIRECTORIES "${NCURSES_INCLUDE_DIR}") endif() set(NCURSES_INCLUDE_DIRS ${NCURSES_INCLUDE_DIR}) set(NCURSES_LIBRARIES ${NCURSES_LIBRARY}) unset(NCURSES_INCLUDE_DIR) unset(NCURSES_LIBRARY) ================================================ FILE: cmake/Findnfs.cmake ================================================ # - Locate NFS library # Defines: # # NFS_FOUND - system has libnfs # # and the following imported targets # # NFS::NFS find_path(NFS_INCLUDE_DIR NAMES nfsc/libnfs.h HINTS /usr ${NFS_ROOT} $ENV{NFS_ROOT} PATH_SUFFIXES include) find_library(NFS_LIBRARY NAMES nfs HINTS /usr ${NFS_ROOT} $ENV{NFS_ROOT} PATH_SUFFIXES ${CMAKE_INSTALL_LIBDIR}) include(FindPackageHandleStandardArgs) find_package_handle_standard_args(nfs REQUIRED_VARS NFS_LIBRARY NFS_INCLUDE_DIR) mark_as_advanced(nfs_FOUND NFS_LIBRARY NFS_INCLUDE_DIR) if (nfs_FOUND AND NOT TARGET NFS::NFS) add_library(NFS::NFS UNKNOWN IMPORTED) set_target_properties(NFS::NFS PROPERTIES IMPORTED_LOCATION "${NFS_LIBRARY}" INTERFACE_INCLUDE_DIRECTORIES "${NFS_INCLUDE_DIR}" INTERFACE_COMPILE_DEFINITIONS HAVE_NFS) else() message(WARNING "Notice: libnfs not found, no NFS support") add_library(NFS::NFS INTERFACE IMPORTED) endif() unset(NFS_LIBRARY) unset(NFS_INCLUDE_DIR) ================================================ FILE: cmake/Findprocps.cmake ================================================ # Try to find procps # Once done, this will define # # PROCPS_FOUND - system has uuid # PROCPS_INCLUDE_DIRS - uuid include directories # PROCPS_LIBRARIES - libraries needed to use uuid # # and the following imported target # # PROCPS::PROCPS find_package(PkgConfig) pkg_check_modules(PC_procps QUIET libprocps) set(PROCPS_VERSION ${PC_procps_VERSION}) find_path(PROCPS_INCLUDE_DIR NAMES readproc.h HINTS ${PROCPS_ROOT} ${PC_procps_INCLUDEDIR} ${PC_procps_INCLUDE_DIRS} PATH_SUFFIXES include proc) find_library(PROCPS_LIBRARY NAMES procps HINTS ${PROCPS_ROOT} ${PC_procps_LIBDIR} ${PC_procps_LIBRARY_DIRS} PATH_SUFFIXES ${CMAKE_INSTALL_LIBDIR}) include(FindPackageHandleStandardArgs) find_package_handle_standard_args(procps REQUIRED_VARS PROCPS_LIBRARY PROCPS_INCLUDE_DIR VERSION_VAR PROCPS_VERSION) if (PROCPS_FOUND AND NOT TARGET PROCPS::PROCPS) mark_as_advanced(PROCPS_FOUND PROCPS_INCLUDE_DIR PROCPS_LIBRARY) add_library(PROCPS::PROCPS UNKNOWN IMPORTED) set_target_properties(PROCPS::PROCPS PROPERTIES IMPORTED_LOCATION "${PROCPS_LIBRARY}" INTERFACE_INCLUDE_DIRECTORIES "${PROCPS_INCLUDE_DIR}") endif() unset(PROCPS_INCLUDE_DIR) unset(PROCPS_LIBRARY) ================================================ FILE: cmake/Findreadline.cmake ================================================ # Try to find libreadline # Once done, this will define # # READLINE_FOUND - system has readline # READLINE_INCLUDE_DIRS - readline include directories # READLINE_LIBRARIES - libraries need to use readline # # and the following imported targets # # READLINE::READLINE find_path(READLINE_INCLUDE_DIR NAMES readline/readline.h HINTS ${READLINE_ROOT}) find_library(READLINE_LIBRARY NAMES readline HINTS ${READLINE_ROOT} PATH_SUFFIXES ${CMAKE_INSTALL_LIBDIR}) include(FindPackageHandleStandardArgs) find_package_handle_standard_args(readline REQUIRED_VARS READLINE_LIBRARY READLINE_INCLUDE_DIR) mark_as_advanced(READLINE_FOUND READLINE_LIBRARY READLINE_INCLUDE_DIR) if (READLINE_FOUND AND NOT TARGET READLINE::READLINE) add_library(READLINE::READLINE UNKNOWN IMPORTED) set_target_properties(READLINE::READLINE PROPERTIES IMPORTED_LOCATION "${READLINE_LIBRARY}" INTERFACE_INCLUDE_DIRECTORIES "${READLINE_INCLUDE_DIR}") endif() set(READLINE_INCLUDE_DIRS ${READLINE_INCLUDE_DIR}) set(READLINE_LIBRARIES ${READLINE_LIBRARY}) ================================================ FILE: cmake/Finduuid.cmake ================================================ # Try to find uuid # Once done, this will define # # UUID_FOUND - system has uuid # UUID_INCLUDE_DIRS - uuid include directories # UUID_LIBRARIES - libraries needed to use uuid # # and the following imported target # # UUID::UUID find_package(PkgConfig) pkg_check_modules(PC_uuid QUIET uuid) set(UUID_VERSION ${PC_uuid_VERSION}) find_path(UUID_INCLUDE_DIR NAMES uuid.h HINTS ${UUID_ROOT} ${PC_uuid_INCLUDEDIR} ${PC_uuid_INCLUDE_DIRS} PATH_SUFFIXES include uuid) find_library(UUID_LIBRARY NAMES uuid HINTS ${UUID_ROOT} ${PC_uuid_LIBDIR} ${PC_uuid_LIBRARY_DIRS} PATH_SUFFIXES ${CMAKE_INSTALL_LIBDIR}) include(FindPackageHandleStandardArgs) find_package_handle_standard_args(uuid REQUIRED_VARS UUID_LIBRARY UUID_INCLUDE_DIR VERSION_VAR UUID_VERSION) if (UUID_FOUND AND NOT TARGET UUID::UUID) mark_as_advanced(UUID_FOUND UUID_INCLUDE_DIR UUID_LIBRARY) add_library(UUID::UUID UNKNOWN IMPORTED) set_target_properties(UUID::UUID PROPERTIES IMPORTED_LOCATION "${UUID_LIBRARY}" INTERFACE_INCLUDE_DIRECTORIES "${UUID_INCLUDE_DIR}") endif() set(UUID_INCLUDE_DIRS ${UUID_INCLUDE_DIR}) set(UUID_LIBRARIES ${UUID_LIBRARY}) unset(UUID_INCLUDE_DIR) unset(UUID_LIBRARY) ================================================ FILE: cmake/Findxfs.cmake ================================================ # Try to find xfs # Once done, this will define # # XFS_FOUND - system has xfs # XFS_INCLUDE_DIRS - xfs include directories # # and the following imported target # # XFS::XFS find_path(XFS_INCLUDE_DIR NAMES xfs/xfs.h HINTS ${XFS_ROOT} PATH_SUFFIXES include) include(FindPackageHandleStandardArgs) find_package_handle_standard_args(xfs DEFAULT_MSG XFS_INCLUDE_DIR) mark_as_advanced(XFS_INCLUDE_DIR) if (XFS_FOUND AND NOT TARGET XFS::XFS) add_library(XFS::XFS INTERFACE IMPORTED) set_target_properties(XFS::XFS PROPERTIES INTERFACE_INCLUDE_DIRECTORIES "${XFS_INCLUDE_DIR}") endif() set(XFS_INCLUDE_DIRS ${XFS_INCLUDE_DIR}) unset(XFS_INCLUDE_DIR) ================================================ FILE: cmake/Findxxhash.cmake ================================================ # Try to find libxxhash (devel) # Once done, this will define # # XXHASH_FOUND - system nhas xxhash # XXHASH_INCLUDE_DIRS - xxhash include directories # XXHASH_LIBRARIES - xxhash libraries directories # XXHASH_LIBRARY_STATIC - xxhash static library # # and the following imported target # # XXHASH::XXHASH find_path(XXHASH_INCLUDE_DIR NAMES xxhash.h HINTS ${XXHASH_ROOT} PATH_SUFFIXES include) find_library(XXHASH_LIBRARY NAME xxhash HINTS ${XXHASH_ROOT} PATH_SUFFIXES ${CMAKE_INSTALL_LIBDIR}) include(FindPackageHandleStandardArgs) find_package_handle_standard_args(xxhash REQUIRED_VARS XXHASH_LIBRARY XXHASH_INCLUDE_DIR) mark_as_advanced(XXHASH_LIBRARY XXHASH_INCLUDE_DIR) if (XXHASH_FOUND AND NOT TARGET XXHASH::XXHASH) add_library(XXHASH::XXHASH STATIC IMPORTED) set_target_properties(XXHASH::XXHASH PROPERTIES IMPORTED_LOCATION "${XXHASH_LIBRARY}" INTERFACE_INCLUDE_DIRECTORIES "${XXHASH_INCLUDE_DIR}") target_compile_definitions(XXHASH::XXHASH INTERFACE XXHASH_FOUND) else() message(WARNING "Notice: XXHASH not found, no XXHASH support") add_library(XXHASH::XXHASH INTERFACE IMPORTED) endif() unset(XXHASH_INCLUDE_DIRS) unset(XXHASH_LIBRARIES) ================================================ FILE: cmake/Findzstd.cmake ================================================ # Try to find zstd # Once done, this will define # # ZSTD_FOUND - system has zstd # ZSTD_LIBRARIES - zstd library # # and the following imported target # # ZSTD::ZSTD find_library(ZSTD_LIBRARY NAMES zstd HINTS ${ZSTD_ROOT} PATH_SUFFIXES ${CMAKE_INSTALL_LIBDIR}) include(FindPackageHandleStandardArgs) find_package_handle_standard_args(zstd DEFAULT_MSG ZSTD_LIBRARY) if (ZSTD_FOUND AND NOT TARGET ZSTD::ZSTD) mark_as_advanced(ZSTD_FOUND ZSTD_LIBRARY) add_library(ZSTD::ZSTD UNKNOWN IMPORTED) set_target_properties(ZSTD::ZSTD PROPERTIES IMPORTED_LOCATION "${ZSTD_LIBRARY}") endif() unset(ZSTD_LIBRARY) ================================================ FILE: cmake/cmake_uninstall.cmake.in ================================================ IF(NOT EXISTS "@CMAKE_CURRENT_BINARY_DIR@/install_manifest.txt") MESSAGE(FATAL_ERROR "Cannot find install manifest: \"@CMAKE_CURRENT_BINARY_DIR@/install_manifest.txt\"") ENDIF(NOT EXISTS "@CMAKE_CURRENT_BINARY_DIR@/install_manifest.txt") FILE(READ "@CMAKE_CURRENT_BINARY_DIR@/install_manifest.txt" files) STRING(REGEX REPLACE "\n" ";" files "${files}") FOREACH(file ${files}) SET(file_to_remove "$ENV{DESTDIR}${file}") MESSAGE(STATUS "Uninstalling \"${file_to_remove}\"") IF(EXISTS "${file_to_remove}" OR IS_SYMLINK "${file_to_remove}") EXECUTE_PROCESS( COMMAND @CMAKE_COMMAND@ -E remove "${file_to_remove}" OUTPUT_VARIABLE rm_out RESULT_VARIABLE rm_retval ) IF(NOT "${rm_retval}" STREQUAL "0") MESSAGE(FATAL_ERROR "Problem when removing \"${file_to_remove}\"") ENDIF(NOT "${rm_retval}" STREQUAL "0") ELSE(EXISTS "${file_to_remove}" OR IS_SYMLINK "${file_to_remove}") MESSAGE(STATUS "File \"${file_to_remove}\" does not exist.") ENDIF(EXISTS "${file_to_remove}" OR IS_SYMLINK "${file_to_remove}") ENDFOREACH(file) IF(DEFINED file_to_remove) UNSET(file_to_remove) ENDIF() ================================================ FILE: cmake/config_spec.cmake.in ================================================ # ---------------------------------------------------------------------- # File: CMakeLists.txt # Author: Elvin-Alin Sindrilaru # ---------------------------------------------------------------------- # ************************************************************************ # * EOS - the CERN Disk Storage System * # * Copyright (C) 2011 CERN/Switzerland * # * * # * This program is free software: you can redistribute it and/or modify * # * it under the terms of the GNU General Public License as published by * # * the Free Software Foundation, either version 3 of the License, or * # * (at your option) any later version. * # * * # * This program is distributed in the hope that it will be useful, * # * but WITHOUT ANY WARRANTY; without even the implied warranty of * # * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * # * GNU General Public License for more details. * # * * # * You should have received a copy of the GNU General Public License * # * along with this program. If not, see .* # ************************************************************************ set(SRC_DIR @CMAKE_CURRENT_SOURCE_DIR@) set(CPACK_PACKAGE_NAME @CPACK_PACKAGE_NAME@) set(CPACK_PACKAGE_VERSION @CPACK_PACKAGE_VERSION@) set(CPACK_PACKAGE_RELEASE @CPACK_PACKAGE_RELEASE@) set(CPACK_PACKAGE_VERSION_MAJOR @CPACK_PACKAGE_VERSION_MAJOR@) configure_file(${SRC_DIR}/eos.spec.in ${SRC_DIR}/eos.spec) ================================================ FILE: common/Assert.hh ================================================ // ---------------------------------------------------------------------- // File: Assert.hh // Author: Georgios Bitzers - CERN // ---------------------------------------------------------------------- /************************************************************************ * EOS - the CERN Disk Storage System * * Copyright (C) 2011 CERN/Switzerland * * * * This program is free software: you can redistribute it and/or modify * * it under the terms of the GNU General Public License as published by * * the Free Software Foundation, either version 3 of the License, or * * (at your option) any later version. * * * * This program is distributed in the hope that it will be useful, * * but WITHOUT ANY WARRANTY; without even the implied warranty of * * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * * GNU General Public License for more details. * * * * You should have received a copy of the GNU General Public License * * along with this program. If not, see .* ************************************************************************/ #pragma once #include #define eos_assert(condition) if(!((condition))) { std::cerr << "assertion violation in " << __PRETTY_FUNCTION__ << " at " << __FILE__ << ":" << __LINE__ << ", condition is not true: " << #condition << std::endl; _exit(1); } #define SSTR(message) static_cast(std::ostringstream().flush() << message).str() #define DBG(message) std::cerr << __FILE__ << ":" << __LINE__ << " -- " << #message << " = " << message << std::endl ================================================ FILE: common/AssistedThread.hh ================================================ // ---------------------------------------------------------------------- // File: AssistedThread.hh // Author: Georgios Bitzes - CERN // ---------------------------------------------------------------------- /************************************************************************ * quarkdb - a redis-like highly available key-value store * * Copyright (C) 2019 CERN/Switzerland * * * * This program is free software: you can redistribute it and/or modify * * it under the terms of the GNU General Public License as published by * * the Free Software Foundation, either version 3 of the License, or * * (at your option) any later version. * * * * This program is distributed in the hope that it will be useful, * * but WITHOUT ANY WARRANTY; without even the implied warranty of * * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * * GNU General Public License for more details. * * * * You should have received a copy of the GNU General Public License * * along with this program. If not, see .* ************************************************************************/ #pragma once #include #include #include #include #include #include #include // Thread name size limit from pthread_setname_np manual, while OSX allows for // 64 characters, it makes sense to keep it at 16 for portability constexpr auto THREAD_NAME_LIMIT = 15; //------------------------------------------------------------------------------ // C++ threads offer no easy way to stop a thread once it's started. Signalling // "stop" to a (potentially sleeping) background thread involves a subtle dance // involving a mutex, condition variable, and possibly an atomic. // // Doing this correctly for every thread is a huge pain, which this class // tries to alleviate. // // How to create a thread: Just like std::thread, ie // AssistedThread(&SomeClass::SomeFunction, this, some_int_value) // // The function will receive a thread assistant object as *one extra* // parameter *at the end*, for example: // // void SomeClass::SomeFunction(int some_int_value, ThreadAssistant &assistant) // // The assistant object can then be used to check if thread termination has been // requested, or sleep for a specified amount of time but wake up immediatelly // the moment termination is requested. // // A common pattern for background threads is then: // while(!assistant.terminationRequested()) { // doStuff(); // assistant.wait_for(std::chrono::seconds(1)); // } //------------------------------------------------------------------------------ class AssistedThread; //------------------------------------------------------------------------------ //! Class ThreadAssistant //------------------------------------------------------------------------------ class ThreadAssistant { public: void reset() { stopFlag = false; terminationCallbacks.clear(); } void requestTermination() { std::lock_guard lock(mtx); if (!stopFlag) { stopFlag = true; notifier.notify_all(); for (size_t i = 0; i < terminationCallbacks.size(); i++) { terminationCallbacks[i](); } } } void registerCallback(std::function callable) { std::lock_guard lock(mtx); terminationCallbacks.emplace_back(std::move(callable)); if (stopFlag) { //------------------------------------------------------------------------ // Careful here.. This is a race condition where thread termination has // already been requested, even though we're not done yet registering // callbacks, apparently. // // Let's simply call the callback ourselves. //------------------------------------------------------------------------ (terminationCallbacks.back())(); } } void dropCallbacks() { std::lock_guard lock(mtx); terminationCallbacks.clear(); } bool terminationRequested() { return stopFlag; } template void wait_for(T duration) { std::unique_lock lock(mtx); if (stopFlag) { return; } notifier.wait_for(lock, duration); } template void wait_until(T duration) { std::unique_lock lock(mtx); if (stopFlag) { return; } notifier.wait_until(lock, duration); } //---------------------------------------------------------------------------- // Ok, this is a bit weird: Consider an AssistedThread which "owns" or // coordinates a bunch of other threads: // // void Coordinator(ThreadAssistant &assistant) { // AssistedThread worker1( ... ); // AssistedThread worker2( ... ); // AssistedThread worker3( ... ); // // worker1.blockUntilThreadJoins(); // worker2.blockUntilThreadJoins(); // worker3.blockUntilThreadJoins(); // } // // We would like that any requests to shut down Coordinator propagate to all // workers. Otherwise, since Coordinator blocks waiting for the workers to // terminate, its own early termination signal would get ignored. // // propagateTerminationSignal does just this. In the above example, call: // assistant.propagateTerminationSignal(worker1); // assistant.propagateTerminationSignal(worker2); // assistant.propagateTerminationSignal(worker3); // // And the moment Coordinator is asked to terminate, all registered threads // will, too. // // NOTE: assistant object must belong to a different thread! //---------------------------------------------------------------------------- void propagateTerminationSignal(AssistedThread& thread); static void setSelfThreadName(std::string name) { #ifndef __APPLE__ pthread_setname_np(pthread_self(), name.substr(0, 15).c_str()); #endif } private: friend class AssistedThread; // Private constructor - only AssistedThread can create such an object. ThreadAssistant(bool flag) : stopFlag(flag) {} std::atomic stopFlag; std::mutex mtx; std::condition_variable notifier; std::vector> terminationCallbacks; }; class AssistedThread { public: //---------------------------------------------------------------------------- //! null constructor, no underlying thread //---------------------------------------------------------------------------- AssistedThread() : assistant(new ThreadAssistant(true)), joined(true) {} //---------------------------------------------------------------------------- // universal references, perfect forwarding, variadic template // (C++ is intensifying) //---------------------------------------------------------------------------- template AssistedThread(Args&& ... args) : assistant(new ThreadAssistant(false)), joined(false), th(std::forward(args)..., std::ref(*assistant)) {} // No assignment, no copying AssistedThread& operator=(const AssistedThread&) = delete; // Moving is allowed. AssistedThread(AssistedThread&& other) { assistant = std::move(other.assistant); joined = other.joined; th = std::move(other.th); other.joined = true; } template void reset(Args&& ... args) { join(); assistant.get()->reset(); joined = false; th = std::thread(std::forward(args)..., std::ref(*assistant)); } virtual ~AssistedThread() { join(); } void stop() { if (joined) { return; } assistant->requestTermination(); } void join() { if (joined) { return; } stop(); blockUntilThreadJoins(); } // Different meaning than join, which explicitly asks the thread to // terminate. Here, we simply wait until the thread exits on its own. void blockUntilThreadJoins() { if (joined) { return; } th.join(); joined = true; } void registerCallback(std::function callable) { assistant->registerCallback(std::move(callable)); } void dropCallbacks() { assistant->dropCallbacks(); } //---------------------------------------------------------------------------- //! Set thread name. Useful to have in GDB traces, for example. //---------------------------------------------------------------------------- void setName(const std::string& threadName) { #ifndef __APPLE__ pthread_setname_np(th.native_handle(), threadName.c_str()); #endif } private: std::unique_ptr assistant; bool joined; std::thread th; }; inline void ThreadAssistant::propagateTerminationSignal(AssistedThread& thread) { registerCallback(std::bind(&AssistedThread::stop, &thread)); } ================================================ FILE: common/Audit.cc ================================================ // ---------------------------------------------------------------------- // File: Audit.cc // Author: EOS Team - CERN // ---------------------------------------------------------------------- /************************************************************************ * EOS - the CERN Disk Storage System * * Copyright (C) 2011 CERN/Switzerland * * * * This program is free software: you can redistribute it and/or modify * * it under the terms of the GNU General Public License as published by * * the Free Software Foundation, either version 3 of the License, or * * (at your option) any later version. * * * * This program is distributed in the hope that it will be useful, * * but WITHOUT ANY WARRANTY; without even the implied warranty of * * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * * GNU General Public License for more details. * * * * You should have received a copy of the GNU General Public License * * along with this program. If not, see .* ************************************************************************/ #include "common/Audit.hh" #include "common/Logging.hh" #include "proto/Audit.pb.h" #include #include #include #include #include #include #include #include #include #include #include #include EOSCOMMONNAMESPACE_BEGIN namespace { static inline time_t truncate_to_interval(time_t t, unsigned interval) { if (!interval) return t; return t - (t % interval); } static inline std::string format_segment_filename(time_t t) { struct tm tmval; localtime_r(&t, &tmval); char buf[64]; // audit-YYYYmmdd-HHMMSS.zst (include seconds to support sub-minute rotations) if (strftime(buf, sizeof(buf), "audit-%Y%m%d-%H%M%S.zst", &tmval) == 0) { return "audit-unknown.zst"; } return buf; } static inline bool mkdir_p(const std::string& path, mode_t mode) { if (path.empty()) return false; size_t pos = 0; do { pos = path.find('/', pos + 1); std::string sub = path.substr(0, pos); if (sub.empty()) continue; if (mkdir(sub.c_str(), mode) == -1) { if (errno == EEXIST) continue; struct stat st; if (stat(sub.c_str(), &st) == 0 && S_ISDIR(st.st_mode)) continue; return false; } } while (pos != std::string::npos); return true; } } Audit::Audit(const std::string& baseDirectory, unsigned rotationSeconds, int compressionLevel) : mBaseDir(baseDirectory) , mRotationSeconds(rotationSeconds ? rotationSeconds : 3600) , mCompressionLevel(compressionLevel) , mZstdCctx(nullptr) , mFd(-1) , mCurrentSegmentStart(0) , mAuditRead(false) , mAuditList(false) , mReadAuditAll(false) { // Default: audit common document-style file types for READ const char* defaults[] = { "txt", "pdf", "doc", "docx", "ppt", "pptx", "xls", "xlsx", "odt", "ods", "odp", "rtf", "csv", "json", "xml", "yaml", "yml", "md", "html", "htm" }; mReadAuditSuffixes.assign(std::begin(defaults), std::end(defaults)); } Audit::~Audit() { std::lock_guard g(mMutex); closeWriterLocked(); } void Audit::setBaseDirectory(const std::string& baseDirectory) { std::lock_guard g(mMutex); if (mBaseDir == baseDirectory) { return; } mBaseDir = baseDirectory; closeWriterLocked(); } void Audit::setReadAuditSuffixes(const std::vector& suffixes) { std::lock_guard g(mMutex); mReadAuditSuffixes.clear(); mReadAuditAll = false; for (const auto& s : suffixes) { if (s == "*") { mReadAuditAll = true; continue; } std::string ls; ls.reserve(s.size()); for (char c : s) ls.push_back(static_cast(std::tolower(static_cast(c)))); // strip leading dot if provided if (!ls.empty() && ls[0] == '.') ls.erase(ls.begin()); if (!ls.empty()) mReadAuditSuffixes.push_back(ls); } } void Audit::setReadAuditAll(bool enable) { std::lock_guard g(mMutex); mReadAuditAll = enable; } bool Audit::shouldAuditReadPath(const std::string& path) const { if (mReadAuditAll) return true; // find suffix after last '.' ignoring directories std::string::size_type slash = path.find_last_of('/') ; std::string::size_type dot = path.find_last_of('.'); if (dot == std::string::npos) return false; if (slash != std::string::npos && dot < slash) return false; std::string ext = path.substr(dot + 1); std::string lext; lext.reserve(ext.size()); for (char c : ext) lext.push_back(static_cast(std::tolower(static_cast(c)))); for (const auto& s : mReadAuditSuffixes) { if (lext == s) return true; } return false; } void Audit::audit(const eos::audit::AuditRecord& record) { std::string json; google::protobuf::util::JsonPrintOptions opts; opts.add_whitespace = false; opts.preserve_proto_field_names = true; auto status = google::protobuf::util::MessageToJsonString(record, &json, opts); if (!status.ok()) { eos_static_err("msg=\"failed to serialize audit record to JSON\" err=%s", status.ToString().c_str()); return; } json.push_back('\n'); const time_t now = time(nullptr); std::lock_guard g(mMutex); rotateIfNeededLocked(now); if (mFd < 0 || !mZstdCctx) { // Failed to open writer; drop record return; } ZSTD_inBuffer in = { json.data(), json.size(), 0 }; std::vector outBuf(131072); while (in.pos < in.size) { ZSTD_outBuffer out = { outBuf.data(), outBuf.size(), 0 }; size_t ret = ZSTD_compressStream2(reinterpret_cast(mZstdCctx), &out, &in, ZSTD_e_continue); if (ZSTD_isError(ret)) { eos_static_err("msg=\"zstd compress error\" code=%s", ZSTD_getErrorName(ret)); break; } if (out.pos) { ssize_t w = write(mFd, outBuf.data(), out.pos); if (w < 0) { eos_static_err("msg=\"write error\" errno=%d err=\"%s\"", errno, strerror(errno)); break; } } } // Flush buffered data so small records are visible immediately { ZSTD_inBuffer fin = { nullptr, 0, 0 }; size_t fret = 0; do { ZSTD_outBuffer out = { outBuf.data(), outBuf.size(), 0 }; fret = ZSTD_compressStream2(reinterpret_cast(mZstdCctx), &out, &fin, ZSTD_e_flush); if (ZSTD_isError(fret)) { eos_static_warning("msg=\"zstd flush error\" code=%s", ZSTD_getErrorName(fret)); break; } if (out.pos) { (void) ::write(mFd, outBuf.data(), out.pos); } } while (fret != 0); } } void Audit::audit(eos::audit::Operation operation, const std::string& filename, const eos::common::VirtualIdentity& vid, const std::string& uuid, const std::string& tid, const std::string& svc, const std::string& target, const eos::audit::Stat* before, const eos::audit::Stat* after, const std::string& attr_name, const std::string& attr_before, const std::string& attr_after, const char* src_file, int src_line, const char* version) { eos::audit::AuditRecord rec; rec.set_timestamp(time(nullptr)); rec.set_path(filename); rec.set_operation(operation); rec.set_client_ip(vid.host); if (vid.name.length()) { rec.set_account(vid.name.c_str()); } else if (!vid.uid_string.empty()) { rec.set_account(vid.uid_string); } else { rec.set_account(std::to_string(vid.uid)); } rec.mutable_auth()->set_mechanism(vid.prot.length() ? vid.prot.c_str() : "local"); if (vid.gateway) { (*rec.mutable_auth()->mutable_attributes())["gateway"] = "1"; } if (vid.token && vid.token->Valid()) { rec.mutable_authorization()->add_reasons("token"); } else { rec.mutable_authorization()->add_reasons("uidgid"); } if (!uuid.empty() && uuid != "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx") { rec.set_uuid(uuid); } if (!tid.empty()) rec.set_tid(tid); if (!vid.app.empty()) rec.set_app(vid.app); if (!svc.empty()) rec.set_svc(svc); if (!target.empty()) rec.set_target(target); if (before) rec.mutable_before()->CopyFrom(*before); if (after) rec.mutable_after()->CopyFrom(*after); if (!attr_name.empty()) { auto* ac = rec.add_attrs(); ac->set_name(attr_name); ac->set_before(attr_before); ac->set_after(attr_after); } if (src_file && *src_file) { const char* basename = strrchr(src_file, '/') ? strrchr(src_file, '/') + 1 : src_file; std::string software = basename; if (src_line > 0) { software += ":" + std::to_string(src_line); } if (version && *version) { software += "@" + std::string(version); } rec.set_software(software); } audit(rec); } void Audit::rotateIfNeededLocked(time_t now) { const time_t seg = truncate_to_interval(now, mRotationSeconds); if (mFd >= 0 && mZstdCctx && seg == mCurrentSegmentStart) { return; } // Close current if any closeWriterLocked(); // Open new (void)openWriterLocked(seg); } bool Audit::openWriterLocked(time_t segmentStart) { if (mBaseDir.empty()) { return false; } ensureDirectoryExistsLocked(); const std::string filename = makeSegmentPath(segmentStart); mFd = ::open(filename.c_str(), O_CREAT | O_WRONLY | O_TRUNC | O_CLOEXEC, 0644); if (mFd < 0) { eos_static_err("msg=\"cannot open audit file\" path=\"%s\" errno=%d err=\"%s\"", filename.c_str(), errno, strerror(errno)); return false; } mZstdCctx = ZSTD_createCCtx(); if (!mZstdCctx) { eos_static_err("msg=\"cannot create zstd context\""); ::close(mFd); mFd = -1; return false; } if (ZSTD_isError(ZSTD_CCtx_setParameter(reinterpret_cast(mZstdCctx), ZSTD_c_compressionLevel, mCompressionLevel))) { eos_static_warning("msg=\"failed to set zstd compression level\" level=%d", mCompressionLevel); } // Ensure a valid ZSTD frame header is written so readers like zstdcat don't // fail on an empty, newly-rotated file. Flush pending header with empty input. { std::vector outBuf(16384); ZSTD_inBuffer in = { nullptr, 0, 0 }; ZSTD_outBuffer out = { outBuf.data(), outBuf.size(), 0 }; size_t ret = ZSTD_compressStream2(reinterpret_cast(mZstdCctx), &out, &in, ZSTD_e_flush); if (ZSTD_isError(ret)) { eos_static_warning("msg=\"zstd header flush error\" code=%s", ZSTD_getErrorName(ret)); } if (out.pos) { (void)::write(mFd, outBuf.data(), out.pos); } } // Update symlink audit.zstd -> current file (best-effort) std::string linkPath = mBaseDir + "/audit.zstd"; (void)::unlink(linkPath.c_str()); (void)::symlink(filename.c_str(), linkPath.c_str()); mCurrentSegmentStart = segmentStart; return true; } void Audit::closeWriterLocked() { if (mFd >= 0 && mZstdCctx) { std::vector outBuf(65536); ZSTD_inBuffer in = { nullptr, 0, 0 }; size_t ret = 0; do { ZSTD_outBuffer out = { outBuf.data(), outBuf.size(), 0 }; ret = ZSTD_compressStream2(reinterpret_cast(mZstdCctx), &out, &in, ZSTD_e_end); if (ZSTD_isError(ret)) { eos_static_err("msg=\"zstd endStream error\" code=%s", ZSTD_getErrorName(ret)); break; } if (out.pos) { (void)::write(mFd, outBuf.data(), out.pos); } } while (ret != 0); } if (mFd >= 0) { ::close(mFd); mFd = -1; } if (mZstdCctx) { ZSTD_freeCCtx(reinterpret_cast(mZstdCctx)); mZstdCctx = nullptr; } mCurrentSegmentStart = 0; } std::string Audit::makeSegmentPath(time_t segmentStart) const { std::ostringstream oss; oss << mBaseDir; if (!mBaseDir.empty() && mBaseDir.back() != '/') { oss << '/'; } oss << format_segment_filename(segmentStart); return oss.str(); } void Audit::ensureDirectoryExistsLocked() { struct stat st; if (stat(mBaseDir.c_str(), &st) == 0 && S_ISDIR(st.st_mode)) { return; } if (!mkdir_p(mBaseDir, 0755)) { eos_static_err("msg=\"failed to create audit directory\" dir=\"%s\" errno=%d err=\"%s\"", mBaseDir.c_str(), errno, strerror(errno)); } } EOSCOMMONNAMESPACE_END ================================================ FILE: common/Audit.hh ================================================ // ---------------------------------------------------------------------- // File: Audit.hh // Author: EOS Team - CERN // ---------------------------------------------------------------------- /************************************************************************ * EOS - the CERN Disk Storage System * * Copyright (C) 2011 CERN/Switzerland * * * * This program is free software: you can redistribute it and/or modify * * it under the terms of the GNU General Public License as published by * * the Free Software Foundation, either version 3 of the License, or * * (at your option) any later version. * * * * This program is distributed in the hope that it will be useful, * * but WITHOUT ANY WARRANTY; without even the implied warranty of * * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * * GNU General Public License for more details. * * * * You should have received a copy of the GNU General Public License * * along with this program. If not, see .* ************************************************************************/ /** * @file Audit.hh * * @brief Audit logging interface writing JSON lines compressed with ZSTD. * */ #ifndef __EOSCOMMON_AUDIT__HH__ #define __EOSCOMMON_AUDIT__HH__ #include "common/Namespace.hh" #include "common/VirtualIdentity.hh" #include "proto/Audit.pb.h" #include #include // Forward declaration for the generated protobuf namespace eos { namespace audit { class AuditRecord; } } EOSCOMMONNAMESPACE_BEGIN /** * @class Audit * @brief Thread-safe audit logger writing newline-delimited JSON to ZSTD files * with time-based rotation (default 1 hour). */ class Audit { public: /** * @brief Construct an audit logger * @param baseDirectory directory where audit files are created * @param rotationSeconds rotation interval in seconds (default 3600) * @param compressionLevel zstd compression level (default 3) */ Audit(const std::string& baseDirectory, unsigned rotationSeconds = 3600, int compressionLevel = 3); ~Audit(); Audit(const Audit&) = delete; Audit& operator=(const Audit&) = delete; Audit(Audit&&) = delete; Audit& operator=(Audit&&) = delete; /** * @brief Update base directory for output files. Triggers rotation. */ void setBaseDirectory(const std::string& baseDirectory); /** * @brief Enable/disable audit logging for specific operations * @param enable true to enable, false to disable */ void setReadAuditing(bool enable) { mAuditRead = enable; } void setListAuditing(bool enable) { mAuditList = enable; } /** * @brief Check if auditing is enabled for specific operations * @return true if enabled, false otherwise */ bool isReadAuditingEnabled() const { return mAuditRead; } bool isListAuditingEnabled() const { return mAuditList; } /** * @brief Configure which file suffixes should trigger READ auditing. * If the vector contains "*", all files are audited for READ. */ void setReadAuditSuffixes(const std::vector& suffixes); /** * @brief Convenience to enable auditing all files for READ. */ void setReadAuditAll(bool enable); /** * @brief Check if the given path matches the READ auditing suffix policy. */ bool shouldAuditReadPath(const std::string& path) const; /** * @brief Append a record to the audit log (JSON line). Thread-safe. */ void audit(const eos::audit::AuditRecord& record); /** * @brief Convenience overload to build and append an audit record. * Populates common fields from VirtualIdentity. * @param operation operation type (e.g. DELETE, CREATE) * @param filename affected path * @param vid caller identity (for account, client_ip, mechanism, app, token) * @param uuid unique request id * @param tid trace identifier (short token) * @param svc acting service (e.g. "mgm") * @param target optional destination path (for rename/symlink) */ void audit(eos::audit::Operation operation, const std::string& filename, const eos::common::VirtualIdentity& vid, const std::string& uuid, const std::string& tid, const std::string& svc, const std::string& target = std::string(), const eos::audit::Stat* before = nullptr, const eos::audit::Stat* after = nullptr, const std::string& attr_name = std::string(), const std::string& attr_before = std::string(), const std::string& attr_after = std::string(), const char* src_file = nullptr, int src_line = 0, const char* version = nullptr); private: void rotateIfNeededLocked(time_t now); bool openWriterLocked(time_t segmentStart); void closeWriterLocked(); std::string makeSegmentPath(time_t segmentStart) const; void ensureDirectoryExistsLocked(); std::mutex mMutex; std::string mBaseDir; unsigned mRotationSeconds; int mCompressionLevel; void* mZstdCctx; // ZSTD_CCtx* int mFd; // file descriptor for current .zst file time_t mCurrentSegmentStart; bool mAuditRead = false; bool mAuditList = false; bool mReadAuditAll = false; std::vector mReadAuditSuffixes; // lowercase suffixes without dot }; EOSCOMMONNAMESPACE_END // Helper macro to capture source location/version for audit calls #ifndef VERSION #define VERSION "unknown" #endif #endif ================================================ FILE: common/BehaviourConfig.cc ================================================ //------------------------------------------------------------------------------ //! @file BehaviourConfig.cc //------------------------------------------------------------------------------ /************************************************************************ * EOS - the CERN Disk Storage System * * Copyright (C) 2024 CERN/Switzerland * * * * This program is free software: you can redistribute it and/or modify * * it under the terms of the GNU General Public License as published by * * the Free Software Foundation, either version 3 of the License, or * * (at your option) any later version. * * * * This program is distributed in the hope that it will be useful, * * but WITHOUT ANY WARRANTY; without even the implied warranty of * * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * * GNU General Public License for more details. * * * * You should have received a copy of the GNU General Public License * * along with this program. If not, see .* ************************************************************************/ #include "common/BehaviourConfig.hh" EOSCOMMONNAMESPACE_BEGIN //---------------------------------------------------------------------------- // Check the accepted configuration values per behaviour //---------------------------------------------------------------------------- bool AcceptedValue(BehaviourType behaviour, const std::string& value) { if (behaviour == BehaviourType::RainMinFsidEntry) { if ((value != "on") && (value != "off")) { return false; } } return true; } //---------------------------------------------------------------------------- // Convert string to behaviour type //---------------------------------------------------------------------------- BehaviourType BehaviourConfig::ConvertStringToBehaviour(const std::string& input) { if (input == "rain_min_fsid_entry") { return BehaviourType::RainMinFsidEntry; } else if (input == "all") { return BehaviourType::All; } else { return BehaviourType::None; } } //---------------------------------------------------------------------------- //! Convert behaviour type to string //---------------------------------------------------------------------------- std::string BehaviourConfig::ConvertBehaviourToString(const BehaviourType& btype) { if (btype == BehaviourType::RainMinFsidEntry) { return "rain_min_fsid_entry"; } else { return "unknown"; } } //------------------------------------------------------------------------------ // Set behaviour change //------------------------------------------------------------------------------ bool BehaviourConfig::Set(BehaviourType behaviour, const std::string& value) { if (!AcceptedValue(behaviour, value)) { return false; } std::unique_lock lock(mMutex); if (value == "off") { mMapBehaviours.erase(behaviour); } else { mMapBehaviours[behaviour] = value; } return true; } //------------------------------------------------------------------------------ // Get behaviour configuration value //------------------------------------------------------------------------------ std::string BehaviourConfig::Get(const BehaviourType& behaviour) const { std::unique_lock loc(mMutex); auto it = mMapBehaviours.find(behaviour); if (it != mMapBehaviours.end()) { return it->second; } return std::string(); } //------------------------------------------------------------------------------ // Check if given behaviour exists in the map //------------------------------------------------------------------------------ bool BehaviourConfig::Exists(const BehaviourType& behaviour) const { std::unique_lock lock(mMutex); return (mMapBehaviours.find(behaviour) != mMapBehaviours.end()); } //------------------------------------------------------------------------------ // List all configured behaviours //------------------------------------------------------------------------------ std::map BehaviourConfig::List() const { std::map output; std::unique_lock lock(mMutex); for (const auto& elem : mMapBehaviours) { output[ConvertBehaviourToString(elem.first)] = elem.second; } return output; } //------------------------------------------------------------------------------ // Clean the given behaviour type //------------------------------------------------------------------------------ void BehaviourConfig::Clear(const BehaviourType& behaviour) { if (behaviour == BehaviourType::None) { return; } std::unique_lock lock(mMutex); if (behaviour == BehaviourType::All) { mMapBehaviours.clear(); } else { auto it = mMapBehaviours.find(behaviour); if (it != mMapBehaviours.end()) { mMapBehaviours.erase(it); } } } EOSCOMMONNAMESPACE_END ================================================ FILE: common/BehaviourConfig.hh ================================================ //------------------------------------------------------------------------------ //! @file BehaviourConfig.hh //------------------------------------------------------------------------------ /************************************************************************ * EOS - the CERN Disk Storage System * * Copyright (C) 2024 CERN/Switzerland * * * * This program is free software: you can redistribute it and/or modify * * it under the terms of the GNU General Public License as published by * * the Free Software Foundation, either version 3 of the License, or * * (at your option) any later version. * * * * This program is distributed in the hope that it will be useful, * * but WITHOUT ANY WARRANTY; without even the implied warranty of * * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * * GNU General Public License for more details. * * * * You should have received a copy of the GNU General Public License * * along with this program. If not, see .* ************************************************************************/ #include "common/Namespace.hh" #include "common/Logging.hh" #include EOSCOMMONNAMESPACE_BEGIN //! Type of supported behaviours enum struct BehaviourType { None, RainMinFsidEntry, All, }; //------------------------------------------------------------------------------ //! Class MgmBehaviour - object used to store the MGM behaviour changes //------------------------------------------------------------------------------ class BehaviourConfig: public eos::common::LogId { public: //---------------------------------------------------------------------------- //! Convert string to behaviour type //! //! @param input string representation //! //! @return behaviour type object //---------------------------------------------------------------------------- static BehaviourType ConvertStringToBehaviour(const std::string& input); //---------------------------------------------------------------------------- //! Convert behaviour type to string //! //! @param btype behaviour type object //! //! @return string representation //---------------------------------------------------------------------------- static std::string ConvertBehaviourToString(const BehaviourType& btype); //---------------------------------------------------------------------------- //! Constructor //---------------------------------------------------------------------------- BehaviourConfig() = default; //---------------------------------------------------------------------------- //! Destructor //---------------------------------------------------------------------------- ~BehaviourConfig() = default; //---------------------------------------------------------------------------- //! Check if there is any behaviour change //! //! @return true if behaviour changes are registered, otherwise false //---------------------------------------------------------------------------- bool IsEmpty() const { std::unique_lock lock(mMutex); return mMapBehaviours.empty(); } //---------------------------------------------------------------------------- //! Set behaviour change //! //! @param behaviour type of behaviour //! @param value configuration value //! //! @return true if successful, otherwise false //---------------------------------------------------------------------------- bool Set(BehaviourType behaviour, const std::string& value); //---------------------------------------------------------------------------- //! Get behaviour configuration value //! //! @param behaviour behaviour type //! //! @return string //---------------------------------------------------------------------------- std::string Get(const BehaviourType& behaviour) const; //---------------------------------------------------------------------------- //! Clean the given behaviour type //! //! @param behaviour type of behaviour //---------------------------------------------------------------------------- void Clear(const BehaviourType& behaviour); //---------------------------------------------------------------------------- //! Check if given behaviour exists in the map. We don't care about its //! configuration value in this case. //! //! @param behaviour behaviour type //! //! @return true if it exists in the map, otherwise false //---------------------------------------------------------------------------- bool Exists(const BehaviourType& behaviour) const; //---------------------------------------------------------------------------- //! List all configured behaviours //! //! @param return map of behaviours //---------------------------------------------------------------------------- std::map List() const; private: std::map mMapBehaviours; mutable std::mutex mMutex; }; EOSCOMMONNAMESPACE_END ================================================ FILE: common/BufferManager.cc ================================================ //------------------------------------------------------------------------------ //! @file BufferManager.cc //! @author Elvin-Alin Sindrilaru - CERN //------------------------------------------------------------------------------ /************************************************************************ * EOS - the CERN Disk Storage System * * Copyright (C) 2021 CERN/Switzerland * * * * This program is free software: you can redistribute it and/or modify * * it under the terms of the GNU General Public License as published by * * the Free Software Foundation, either version 3 of the License, or * * (at your option) any later version. * * * * This program is distributed in the hope that it will be useful, * * but WITHOUT ANY WARRANTY; without even the implied warranty of * * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * * GNU General Public License for more details. * * * * You should have received a copy of the GNU General Public License * * along with this program. If not, see .* ************************************************************************/ #include "common/BufferManager.hh" EOSCOMMONNAMESPACE_BEGIN //------------------------------------------------------------------------------ // Get the nearest power of 2 value bigger then the given input but always // greater than min //------------------------------------------------------------------------------ uint32_t GetPowerCeil(const uint32_t input, const uint32_t min) { uint32_t power = min; while (input > power) { power <<= 1; } return power; } //------------------------------------------------------------------------------ // Get amount of system memory //------------------------------------------------------------------------------ uint64_t GetSystemMemorySize() { static uint64_t total_size = 0ull; if (!total_size) { uint64_t pages = sysconf(_SC_PHYS_PAGES); uint64_t page_size = sysconf(_SC_PAGE_SIZE); total_size = pages * page_size; } return total_size; } //------------------------------------------------------------------------------ // Get OS page size aligned buffer //------------------------------------------------------------------------------ std::unique_ptr GetAlignedBuffer(const size_t size) { static long os_pg_size = sysconf(_SC_PAGESIZE); char* raw_buffer = nullptr; std::unique_ptr buffer ((char*) raw_buffer, [](void* ptr) { if (ptr) { free(ptr); } }); if (posix_memalign((void**) &raw_buffer, os_pg_size, size)) { return buffer; } buffer.reset(raw_buffer); return buffer; } EOSCOMMONNAMESPACE_END ================================================ FILE: common/BufferManager.hh ================================================ //------------------------------------------------------------------------------ //! @file BufferManager.hh //! @author Elvin-Alin Sindrilaru - CERN //------------------------------------------------------------------------------ /************************************************************************ * EOS - the CERN Disk Storage System * * Copyright (C) 2020 CERN/Switzerland * * * * This program is free software: you can redistribute it and/or modify * * it under the terms of the GNU General Public License as published by * * the Free Software Foundation, either version 3 of the License, or * * (at your option) any later version. * * * * This program is distributed in the hope that it will be useful, * * but WITHOUT ANY WARRANTY; without even the implied warranty of * * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * * GNU General Public License for more details. * * * * You should have received a copy of the GNU General Public License * * along with this program. If not, see .* ************************************************************************/ #pragma once #include "common/Namespace.hh" #include "common/Logging.hh" #include "common/StringConversion.hh" #include #include #include #include #include EOSCOMMONNAMESPACE_BEGIN //------------------------------------------------------------------------------ //! Get the nearest power of 2 value bigger then the given input but always //! greater than given min //! //! @param input input value //! @param min min power of 2 to be used!!! //! //! @return nearest power of 2 bigger than input //------------------------------------------------------------------------------ uint32_t GetPowerCeil(const uint32_t input, const uint32_t min = 1024); //------------------------------------------------------------------------------ //! Get amount of system memory //------------------------------------------------------------------------------ uint64_t GetSystemMemorySize(); //------------------------------------------------------------------------------ //! Get OS page size aligned buffer //! //! @param size buffer size to be allocated //! //! @return unique_ptr to buffer or null if there is any error //------------------------------------------------------------------------------ std::unique_ptr GetAlignedBuffer(const size_t size); //------------------------------------------------------------------------------ //! Class Buffer //------------------------------------------------------------------------------ class Buffer { friend class BufferManager; public: //---------------------------------------------------------------------------- //! Constructor //---------------------------------------------------------------------------- Buffer(uint64_t size): mCapacity(size), mLength(0ull), mData(nullptr, free) { mData = GetAlignedBuffer(mCapacity); } //---------------------------------------------------------------------------- //! Destructor //---------------------------------------------------------------------------- virtual ~Buffer() = default; //---------------------------------------------------------------------------- //! Get pointer to underlying data //---------------------------------------------------------------------------- inline char* GetDataPtr() { return mData.get(); } uint64_t mCapacity; ///< Available size of the buffer uint64_t mLength; ///< Length of the useful data std::unique_ptr mData; ///< Buffer holding the data }; //------------------------------------------------------------------------------ //! Class BufferSlot //------------------------------------------------------------------------------ class BufferSlot { friend class BufferManager; public: //---------------------------------------------------------------------------- //! Constructor //! //! @param size size of buffers allocated by the current slot //---------------------------------------------------------------------------- BufferSlot(uint64_t size): mNumBuffers(0), mBuffSize(size) {} //---------------------------------------------------------------------------- //! Destructor //---------------------------------------------------------------------------- ~BufferSlot() { std::unique_lock lock(mSlotMutex); mAvailableBuffers.clear(); } //---------------------------------------------------------------------------- //! Move assignment operator //---------------------------------------------------------------------------- BufferSlot& operator =(BufferSlot&& other) noexcept { if (this != &other) { mBuffSize = other.mBuffSize.load(); mNumBuffers.store(other.mNumBuffers); mAvailableBuffers = other.mAvailableBuffers; other.mAvailableBuffers.clear(); } return *this; } //---------------------------------------------------------------------------- //! Move constructor //---------------------------------------------------------------------------- BufferSlot(BufferSlot&& other) noexcept { *this = std::move(other); } //---------------------------------------------------------------------------- //! Get buffer //---------------------------------------------------------------------------- std::pair, bool> GetBuffer() { bool new_alloc = false; std::unique_lock lock(mSlotMutex); if (!mAvailableBuffers.empty()) { auto buff = mAvailableBuffers.front(); mAvailableBuffers.pop_front(); return std::make_pair(buff, new_alloc); } ++mNumBuffers; new_alloc = true; return std::make_pair(std::make_shared(mBuffSize), new_alloc); } //---------------------------------------------------------------------------- //! Recycle buffer object //! //! @param buffer buffer object to be recycled //! @param keep true if buffer is to be saved otherwise false //---------------------------------------------------------------------------- void Recycle(std::shared_ptr buffer, bool keep) { if (keep) { std::unique_lock lock(mSlotMutex); mAvailableBuffers.push_back(buffer); } else { --mNumBuffers; } } //---------------------------------------------------------------------------- //! Try to pop a buffer from the list of available ones if possible //---------------------------------------------------------------------------- bool Pop() { std::unique_lock lock(mSlotMutex); if (!mAvailableBuffers.empty()) { mAvailableBuffers.pop_front(); --mNumBuffers; return true; } return false; } //---------------------------------------------------------------------------- //! Get size of the buffer allocated by this buffer slot //! //! @return size of the buffer allocated by this object //---------------------------------------------------------------------------- uint64_t GetBufferSize() const { return mBuffSize.load(); } private: std::mutex mSlotMutex; std::list> mAvailableBuffers; std::atomic mNumBuffers; std::atomic mBuffSize; }; //------------------------------------------------------------------------------ //! Class BufferManager //------------------------------------------------------------------------------ class BufferManager: public eos::common::LogId { public: //---------------------------------------------------------------------------- //! Constructor //! //! @param max_size maximum total size of allocated buffers //! @param slots number of slots for different buffer sizes which are power //! of 2 and multiple of slot_base_size e.g. 1MB //! slot 0 -> 1MB //! slot 1 -> 2MB //! slot 2 -> 4MB //! ... //! slot 6 -> 64MB //! @param slot_base_sz size of the blocks in the first slot //---------------------------------------------------------------------------- BufferManager(uint64_t max_size = 256 * 1024 * 1024, uint32_t slots = 6, uint64_t slot_base_sz = 1024 * 1024): mMaxSize(max_size), mAllocatedSize(0ull), mNumSlots(slots), mSlotBaseSize(slot_base_sz) { for (uint32_t i = 0u; i <= mNumSlots; ++i) { mSlots.emplace_back((1 << i) * mSlotBaseSize); } } //---------------------------------------------------------------------------- //! Destructor //---------------------------------------------------------------------------- ~BufferManager() = default; //---------------------------------------------------------------------------- //! Get buffer for the given length //! //! @param size minimum size for requested buffer //! //! @return buffer object //---------------------------------------------------------------------------- std::shared_ptr GetBuffer(uint64_t size) { // No new buffer if we already hold more than half of system memory if (mAllocatedSize > (GetSystemMemorySize() >> 1)) { return nullptr; } uint32_t slot {UINT32_MAX}; // Find appropriate slot for the given size for (uint32_t i = 0; i <= mNumSlots; ++i) { if (size <= (mSlotBaseSize * std::pow(2, i))) { slot = i; break; } } // No slot big enough for the given request if (slot == UINT32_MAX) { // No buffer if size is unreasonably large > 512MB if (size > 512 * eos::common::MB) { return nullptr; } mAllocatedSize += size; return std::make_shared(size); } std::pair, bool> pair = mSlots[slot].GetBuffer(); if (pair.second) { mAllocatedSize += pair.first->mCapacity; } return pair.first; } //---------------------------------------------------------------------------- //! Recycle buffer object //! //! @param buffer objec to be recycled //---------------------------------------------------------------------------- void Recycle(std::shared_ptr buffer) { if (buffer == nullptr) { return; } uint32_t slot {UINT32_MAX}; // Find appropriate slot for given buffer for (uint32_t i = 0; i <= mNumSlots; ++i) { if (buffer->mCapacity == (mSlotBaseSize * std::pow(2, i))) { slot = i; break; } } // Buffer larger then our biggest slot, just deallocate if (slot == UINT32_MAX) { mAllocatedSize -= buffer->mCapacity; buffer.reset(); return; } uint64_t total_size {0ull}; auto sorted_slots = GetSortedSlotSizes(total_size); bool keep = (total_size <= mMaxSize); if (!keep) { eos_debug("msg=\"buffer pool is full\" max_size=%s", eos::common::StringConversion::GetPrettySize(mMaxSize).c_str()); // Perform clean up for rest of slots depending on their size for (auto it = sorted_slots.rbegin(); it != sorted_slots.rend(); ++it) { if (it->first > slot) { if (mSlots[it->first].Pop()) { mAllocatedSize -= mSlots[it->first].GetBufferSize(); break; } } if (it->first < slot) { // Free the equivalent of a block from the current slot int free_blocks = 1 << (slot - it->first); while (free_blocks) { if (mSlots[it->first].Pop()) { mAllocatedSize -= mSlots[it->first].GetBufferSize(); --free_blocks; } else { break; } } break; } } } mSlots[slot].Recycle(buffer, keep); if (!keep) { mAllocatedSize -= buffer->mCapacity; } } //---------------------------------------------------------------------------- //! Get sorted distribution of slot sizes from smallest to biggest //! //! @param total_size compute the total size allocated so far //! //! @return sorted vector of pairs of slot ids and size of allocated buffers //! for that corresponding slot //---------------------------------------------------------------------------- std::vector< std::pair > GetSortedSlotSizes(uint64_t& total_size) const { std::vector< std::pair > elem; total_size = 0ull; for (uint32_t i = 0; i <= mNumSlots; ++i) { elem.push_back(std::make_pair(i, (mSlots[i].mNumBuffers * (1 << i) * 1024 * 1024))); total_size += elem.rbegin()->second; } auto comparator = [](std::pair a, std::pair b) { return (a.second < b.second); }; std::sort(elem.begin(), elem.end(), comparator); return elem; } //---------------------------------------------------------------------------- //! Get number of slots handled by the current buffer manager //---------------------------------------------------------------------------- uint32_t GetNumSlots() const { return mNumSlots.load(); } //---------------------------------------------------------------------------- //! Get max size of buffers stored by buffer manager //---------------------------------------------------------------------------- uint64_t GetMaxSize() const { return mMaxSize.load(); } private: #ifdef IN_TEST_HARNESS public: #endif std::atomic mMaxSize; std::atomic mAllocatedSize; std::atomic mNumSlots; const uint64_t mSlotBaseSize; std::vector mSlots; }; //------------------------------------------------------------------------------ //! Managed buffer which is automatically recycled during destruction //------------------------------------------------------------------------------ class ManagedBuffer { public: //---------------------------------------------------------------------------- //! Constructor //---------------------------------------------------------------------------- ManagedBuffer(BufferManager& mgr, uint64_t size): mMgr(mgr) { mBuff = mMgr.GetBuffer(size); } //---------------------------------------------------------------------------- //! Get underlying buffer //---------------------------------------------------------------------------- inline std::shared_ptr GetBuffer() { return mBuff; } //---------------------------------------------------------------------------- //! Destructor //---------------------------------------------------------------------------- ~ManagedBuffer() { mMgr.Recycle(mBuff); } private: BufferManager& mMgr; std::shared_ptr mBuff; }; EOSCOMMONNAMESPACE_END ================================================ FILE: common/CLI11.hpp ================================================ #pragma once // CLI11: Version 1.8.0 // Originally designed by Henry Schreiner // https://github.com/CLIUtils/CLI11 // // This is a standalone header file generated by MakeSingleHeader.py in CLI11/scripts // from: v1.8.0 // // From LICENSE: // // CLI11 1.8 Copyright (c) 2017-2019 University of Cincinnati, developed by Henry // Schreiner under NSF AWARD 1414736. All rights reserved. // // Redistribution and use in source and binary forms of CLI11, with or without // modification, are permitted provided that the following conditions are met: // // 1. Redistributions of source code must retain the above copyright notice, this // list of conditions and the following disclaimer. // 2. Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // 3. Neither the name of the copyright holder nor the names of its contributors // may be used to endorse or promote products derived from this software without // specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR // ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES // (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; // LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON // ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // Standard combined includes: #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include // Verbatim copy from CLI/Version.hpp: #define CLI11_VERSION_MAJOR 1 #define CLI11_VERSION_MINOR 8 #define CLI11_VERSION_PATCH 0 #define CLI11_VERSION "1.8.0" // Verbatim copy from CLI/Macros.hpp: // The following version macro is very similar to the one in PyBind11 #if !(defined(_MSC_VER) && __cplusplus == 199711L) && !defined(__INTEL_COMPILER) #if __cplusplus >= 201402L #define CLI11_CPP14 #if __cplusplus >= 201703L #define CLI11_CPP17 #if __cplusplus > 201703L #define CLI11_CPP20 #endif #endif #endif #elif defined(_MSC_VER) && __cplusplus == 199711L // MSVC sets _MSVC_LANG rather than __cplusplus (supposedly until the standard is fully implemented) // Unless you use the /Zc:__cplusplus flag on Visual Studio 2017 15.7 Preview 3 or newer #if _MSVC_LANG >= 201402L #define CLI11_CPP14 #if _MSVC_LANG > 201402L && _MSC_VER >= 1910 #define CLI11_CPP17 #if __MSVC_LANG > 201703L && _MSC_VER >= 1910 #define CLI11_CPP20 #endif #endif #endif #endif #if defined(CLI11_CPP14) #define CLI11_DEPRECATED(reason) [[deprecated(reason)]] #elif defined(_MSC_VER) #define CLI11_DEPRECATED(reason) __declspec(deprecated(reason)) #else #define CLI11_DEPRECATED(reason) __attribute__((deprecated(reason))) #endif // Verbatim copy from CLI/Optional.hpp: // You can explicitly enable or disable support // by defining to 1 or 0. Extra check here to ensure it's in the stdlib too. // We nest the check for __has_include and it's usage #ifndef CLI11_STD_OPTIONAL #ifdef __has_include #if defined(CLI11_CPP17) && __has_include() #define CLI11_STD_OPTIONAL 1 #else #define CLI11_STD_OPTIONAL 0 #endif #else #define CLI11_STD_OPTIONAL 0 #endif #endif #ifndef CLI11_EXPERIMENTAL_OPTIONAL #define CLI11_EXPERIMENTAL_OPTIONAL 0 #endif #ifndef CLI11_BOOST_OPTIONAL #define CLI11_BOOST_OPTIONAL 0 #endif #if CLI11_BOOST_OPTIONAL #include #if BOOST_VERSION < 106100 #error "This boost::optional version is not supported, use 1.61 or better" #endif #endif #if CLI11_STD_OPTIONAL #include #endif #if CLI11_EXPERIMENTAL_OPTIONAL #include #endif #if CLI11_BOOST_OPTIONAL #include #include #endif // From CLI/Version.hpp: // From CLI/Macros.hpp: // From CLI/Optional.hpp: namespace CLI { #if CLI11_STD_OPTIONAL template std::istream& operator>>(std::istream& in, std::optional& val) { T v; in >> v; val = v; return in; } #endif #if CLI11_EXPERIMENTAL_OPTIONAL template std::istream& operator>>(std::istream& in, std::experimental::optional& val) { T v; in >> v; val = v; return in; } #endif #if CLI11_BOOST_OPTIONAL template std::istream& operator>>(std::istream& in, boost::optional& val) { T v; in >> v; val = v; return in; } #endif // Export the best optional to the CLI namespace #if CLI11_STD_OPTIONAL using std::optional; #elif CLI11_EXPERIMENTAL_OPTIONAL using std::experimental::optional; #elif CLI11_BOOST_OPTIONAL using boost::optional; #endif // This is true if any optional is found #if CLI11_STD_OPTIONAL || CLI11_EXPERIMENTAL_OPTIONAL || CLI11_BOOST_OPTIONAL #define CLI11_OPTIONAL 1 #endif } // namespace CLI // From CLI/StringTools.hpp: namespace CLI { /// Include the items in this namespace to get free conversion of enums to/from streams. /// (This is available inside CLI as well, so CLI11 will use this without a using statement). namespace enums { /// output streaming for enumerations template ::value>::type> std::ostream & operator<<(std::ostream& in, const T& item) { // make sure this is out of the detail namespace otherwise it won't be found when needed return in << static_cast::type>(item); } /// input streaming for enumerations template ::value>::type> std::istream & operator>>(std::istream& in, T& item) { typename std::underlying_type::type i; in >> i; item = static_cast(i); return in; } } // namespace enums /// Export to CLI namespace using namespace enums; namespace detail { // Based on http://stackoverflow.com/questions/236129/split-a-string-in-c /// Split a string by a delim inline std::vector split(const std::string& s, char delim) { std::vector elems; // Check to see if empty string, give consistent result if (s.empty()) { elems.emplace_back(); } else { std::stringstream ss; ss.str(s); std::string item; while (std::getline(ss, item, delim)) { elems.push_back(item); } } return elems; } /// simple utility to convert various types to a string template inline std::string as_string(const T& v) { std::ostringstream s; s << v; return s.str(); } // if the data type is already a string just forward it template ::value>::type> inline auto as_string(T && v) -> decltype(std::forward(v)) { return std::forward(v); } /// Simple function to join a string template std::string join(const T& v, std::string delim = ",") { std::ostringstream s; auto beg = std::begin(v); auto end = std::end(v); if (beg != end) { s << *beg++; } while (beg != end) { s << delim << *beg++; } return s.str(); } /// Simple function to join a string from processed elements template < typename T, typename Callable, typename = typename std::enable_if < !std::is_constructible::value >::type > std::string join(const T& v, Callable func, std::string delim = ",") { std::ostringstream s; auto beg = std::begin(v); auto end = std::end(v); if (beg != end) { s << func(*beg++); } while (beg != end) { s << delim << func(*beg++); } return s.str(); } /// Join a string in reverse order template std::string rjoin(const T& v, std::string delim = ",") { std::ostringstream s; for (size_t start = 0; start < v.size(); start++) { if (start > 0) { s << delim; } s << v[v.size() - start - 1]; } return s.str(); } // Based roughly on http://stackoverflow.com/questions/25829143/c-trim-whitespace-from-a-string /// Trim whitespace from left of string inline std::string& ltrim(std::string& str) { auto it = std::find_if(str.begin(), str.end(), [](char ch) { return !std::isspace(ch, std::locale()); }); str.erase(str.begin(), it); return str; } /// Trim anything from left of string inline std::string& ltrim(std::string& str, const std::string& filter) { auto it = std::find_if(str.begin(), str.end(), [&filter](char ch) { return filter.find(ch) == std::string::npos; }); str.erase(str.begin(), it); return str; } /// Trim whitespace from right of string inline std::string& rtrim(std::string& str) { auto it = std::find_if(str.rbegin(), str.rend(), [](char ch) { return !std::isspace(ch, std::locale()); }); str.erase(it.base(), str.end()); return str; } /// Trim anything from right of string inline std::string& rtrim(std::string& str, const std::string& filter) { auto it = std::find_if(str.rbegin(), str.rend(), [&filter](char ch) { return filter.find(ch) == std::string::npos; }); str.erase(it.base(), str.end()); return str; } /// Trim whitespace from string inline std::string& trim(std::string& str) { return ltrim(rtrim(str)); } /// Trim anything from string inline std::string& trim(std::string& str, const std::string filter) { return ltrim(rtrim(str, filter), filter); } /// Make a copy of the string and then trim it inline std::string trim_copy(const std::string& str) { std::string s = str; return trim(s); } /// Make a copy of the string and then trim it, any filter string can be used (any char in string is filtered) inline std::string trim_copy(const std::string& str, const std::string& filter) { std::string s = str; return trim(s, filter); } /// Print a two part "help" string inline std::ostream& format_help(std::ostream& out, std::string name, std::string description, size_t wid) { name = " " + name; out << std::setw(static_cast(wid)) << std::left << name; if (!description.empty()) { if (name.length() >= wid) { out << "\n" << std::setw(static_cast(wid)) << ""; } for (const char c : description) { out.put(c); if (c == '\n') { out << std::setw(static_cast(wid)) << ""; } } } out << "\n"; return out; } /// Verify the first character of an option template bool valid_first_char(T c) { return std::isalnum(c, std::locale()) || c == '_' || c == '?' || c == '@'; } /// Verify following characters of an option template bool valid_later_char(T c) { return valid_first_char(c) || c == '.' || c == '-'; } /// Verify an option name inline bool valid_name_string(const std::string& str) { if (str.empty() || !valid_first_char(str[0])) { return false; } for (auto c : str.substr(1)) if (!valid_later_char(c)) { return false; } return true; } /// Verify that str consists of letters only inline bool isalpha(const std::string& str) { return std::all_of(str.begin(), str.end(), [](char c) { return std::isalpha(c, std::locale()); }); } /// Return a lower case version of a string inline std::string to_lower(std::string str) { std::transform(std::begin(str), std::end(str), std::begin(str), [](const std::string::value_type & x) { return std::tolower(x, std::locale()); }); return str; } /// remove underscores from a string inline std::string remove_underscore(std::string str) { str.erase(std::remove(std::begin(str), std::end(str), '_'), std::end(str)); return str; } /// Find and replace a substring with another substring inline std::string find_and_replace(std::string str, std::string from, std::string to) { size_t start_pos = 0; while ((start_pos = str.find(from, start_pos)) != std::string::npos) { str.replace(start_pos, from.length(), to); start_pos += to.length(); } return str; } /// check if the flag definitions has possible false flags inline bool has_default_flag_values(const std::string& flags) { return (flags.find_first_of("{!") != std::string::npos); } inline void remove_default_flag_values(std::string& flags) { auto loc = flags.find_first_of('{'); while (loc != std::string::npos) { auto finish = flags.find_first_of("},", loc + 1); if ((finish != std::string::npos) && (flags[finish] == '}')) { flags.erase(flags.begin() + static_cast(loc), flags.begin() + static_cast(finish) + 1); } loc = flags.find_first_of('{', loc + 1); } flags.erase(std::remove(flags.begin(), flags.end(), '!'), flags.end()); } /// Check if a string is a member of a list of strings and optionally ignore case or ignore underscores inline std::ptrdiff_t find_member(std::string name, const std::vector names, bool ignore_case = false, bool ignore_underscore = false) { auto it = std::end(names); if (ignore_case) { if (ignore_underscore) { name = detail::to_lower(detail::remove_underscore(name)); it = std::find_if(std::begin(names), std::end(names), [&name](std::string local_name) { return detail::to_lower(detail::remove_underscore(local_name)) == name; }); } else { name = detail::to_lower(name); it = std::find_if(std::begin(names), std::end(names), [&name](std::string local_name) { return detail::to_lower(local_name) == name; }); } } else if (ignore_underscore) { name = detail::remove_underscore(name); it = std::find_if(std::begin(names), std::end(names), [&name](std::string local_name) { return detail::remove_underscore(local_name) == name; }); } else { it = std::find(std::begin(names), std::end(names), name); } return (it != std::end(names)) ? (it - std::begin(names)) : (-1); } /// Find a trigger string and call a modify callable function that takes the current string and starting position of the /// trigger and returns the position in the string to search for the next trigger string template inline std::string find_and_modify(std::string str, std::string trigger, Callable modify) { size_t start_pos = 0; while ((start_pos = str.find(trigger, start_pos)) != std::string::npos) { start_pos = modify(str, start_pos); } return str; } /// Split a string '"one two" "three"' into 'one two', 'three' /// Quote characters can be ` ' or " inline std::vector split_up(std::string str) { const std::string delims("\'\"`"); auto find_ws = [](char ch) { return std::isspace(ch, std::locale()); }; trim(str); std::vector output; bool embeddedQuote = false; char keyChar = ' '; while (!str.empty()) { if (delims.find_first_of(str[0]) != std::string::npos) { keyChar = str[0]; auto end = str.find_first_of(keyChar, 1); while ((end != std::string::npos) && (str[end - 1] == '\\')) { // deal with escaped quotes end = str.find_first_of(keyChar, end + 1); embeddedQuote = true; } if (end != std::string::npos) { output.push_back(str.substr(1, end - 1)); str = str.substr(end + 1); } else { output.push_back(str.substr(1)); str = ""; } } else { auto it = std::find_if(std::begin(str), std::end(str), find_ws); if (it != std::end(str)) { std::string value = std::string(str.begin(), it); output.push_back(value); str = std::string(it, str.end()); } else { output.push_back(str); str = ""; } } // transform any embedded quotes into the regular character if (embeddedQuote) { output.back() = find_and_replace(output.back(), std::string("\\") + keyChar, std::string(1, keyChar)); embeddedQuote = false; } trim(str); } return output; } /// Add a leader to the beginning of all new lines (nothing is added /// at the start of the first line). `"; "` would be for ini files /// /// Can't use Regex, or this would be a subs. inline std::string fix_newlines(std::string leader, std::string input) { std::string::size_type n = 0; while (n != std::string::npos && n < input.size()) { n = input.find('\n', n); if (n != std::string::npos) { input = input.substr(0, n + 1) + leader + input.substr(n + 1); n += leader.size(); } } return input; } /// This function detects an equal or colon followed by an escaped quote after an argument /// then modifies the string to replace the equality with a space. This is needed /// to allow the split up function to work properly and is intended to be used with the find_and_modify function /// the return value is the offset+1 which is required by the find_and_modify function. inline size_t escape_detect(std::string& str, size_t offset) { auto next = str[offset + 1]; if ((next == '\"') || (next == '\'') || (next == '`')) { auto astart = str.find_last_of("-/ \"\'`", offset - 1); if (astart != std::string::npos) { if (str[astart] == ((str[offset] == '=') ? '-' : '/')) { str[offset] = ' '; // interpret this as a space so the split_up works properly } } } return offset + 1; } /// Add quotes if the string contains spaces inline std::string& add_quotes_if_needed(std::string& str) { if ((str.front() != '"' && str.front() != '\'') || str.front() != str.back()) { char quote = str.find('"') < str.find('\'') ? '\'' : '"'; if (str.find(' ') != std::string::npos) { str.insert(0, 1, quote); str.append(1, quote); } } return str; } } // namespace detail } // namespace CLI // From CLI/Error.hpp: namespace CLI { // Use one of these on all error classes. // These are temporary and are undef'd at the end of this file. #define CLI11_ERROR_DEF(parent, name) \ protected: \ name(std::string ename, std::string msg, int exit_code) : parent(std::move(ename), std::move(msg), exit_code) {} \ name(std::string ename, std::string msg, ExitCodes exit_code) \ : parent(std::move(ename), std::move(msg), exit_code) {} \ \ public: \ name(std::string msg, ExitCodes exit_code) : parent(#name, std::move(msg), exit_code) {} \ name(std::string msg, int exit_code) : parent(#name, std::move(msg), exit_code) {} // This is added after the one above if a class is used directly and builds its own message #define CLI11_ERROR_SIMPLE(name) \ explicit name(std::string msg) : name(#name, msg, ExitCodes::name) {} /// These codes are part of every error in CLI. They can be obtained from e using e.exit_code or as a quick shortcut, /// int values from e.get_error_code(). enum class ExitCodes { Success = 0, IncorrectConstruction = 100, BadNameString, OptionAlreadyAdded, FileError, ConversionError, ValidationError, RequiredError, RequiresError, ExcludesError, ExtrasError, ConfigError, InvalidError, HorribleError, OptionNotFound, ArgumentMismatch, BaseClass = 127 }; // Error definitions /// @defgroup error_group Errors /// @brief Errors thrown by CLI11 /// /// These are the errors that can be thrown. Some of them, like CLI::Success, are not really errors. /// @{ /// All errors derive from this one class Error : public std::runtime_error { int actual_exit_code; std::string error_name{"Error"}; public: int get_exit_code() const { return actual_exit_code; } std::string get_name() const { return error_name; } Error(std::string name, std::string msg, int exit_code = static_cast(ExitCodes::BaseClass)) : runtime_error(msg), actual_exit_code(exit_code), error_name(std::move(name)) {} Error(std::string name, std::string msg, ExitCodes exit_code) : Error(name, msg, static_cast(exit_code)) {} }; // Note: Using Error::Error constructors does not work on GCC 4.7 /// Construction errors (not in parsing) class ConstructionError : public Error { CLI11_ERROR_DEF(Error, ConstructionError) }; /// Thrown when an option is set to conflicting values (non-vector and multi args, for example) class IncorrectConstruction : public ConstructionError { CLI11_ERROR_DEF(ConstructionError, IncorrectConstruction) CLI11_ERROR_SIMPLE(IncorrectConstruction) static IncorrectConstruction PositionalFlag(std::string name) { return IncorrectConstruction(name + ": Flags cannot be positional"); } static IncorrectConstruction Set0Opt(std::string name) { return IncorrectConstruction(name + ": Cannot set 0 expected, use a flag instead"); } static IncorrectConstruction SetFlag(std::string name) { return IncorrectConstruction(name + ": Cannot set an expected number for flags"); } static IncorrectConstruction ChangeNotVector(std::string name) { return IncorrectConstruction(name + ": You can only change the expected arguments for vectors"); } static IncorrectConstruction AfterMultiOpt(std::string name) { return IncorrectConstruction( name + ": You can't change expected arguments after you've changed the multi option policy!"); } static IncorrectConstruction MissingOption(std::string name) { return IncorrectConstruction("Option " + name + " is not defined"); } static IncorrectConstruction MultiOptionPolicy(std::string name) { return IncorrectConstruction(name + ": multi_option_policy only works for flags and exact value options"); } }; /// Thrown on construction of a bad name class BadNameString : public ConstructionError { CLI11_ERROR_DEF(ConstructionError, BadNameString) CLI11_ERROR_SIMPLE(BadNameString) static BadNameString OneCharName(std::string name) { return BadNameString("Invalid one char name: " + name); } static BadNameString BadLongName(std::string name) { return BadNameString("Bad long name: " + name); } static BadNameString DashesOnly(std::string name) { return BadNameString("Must have a name, not just dashes: " + name); } static BadNameString MultiPositionalNames(std::string name) { return BadNameString("Only one positional name allowed, remove: " + name); } }; /// Thrown when an option already exists class OptionAlreadyAdded : public ConstructionError { CLI11_ERROR_DEF(ConstructionError, OptionAlreadyAdded) explicit OptionAlreadyAdded(std::string name) : OptionAlreadyAdded(name + " is already added", ExitCodes::OptionAlreadyAdded) {} static OptionAlreadyAdded Requires(std::string name, std::string other) { return OptionAlreadyAdded(name + " requires " + other, ExitCodes::OptionAlreadyAdded); } static OptionAlreadyAdded Excludes(std::string name, std::string other) { return OptionAlreadyAdded(name + " excludes " + other, ExitCodes::OptionAlreadyAdded); } }; // Parsing errors /// Anything that can error in Parse class ParseError : public Error { CLI11_ERROR_DEF(Error, ParseError) }; // Not really "errors" /// This is a successful completion on parsing, supposed to exit class Success : public ParseError { CLI11_ERROR_DEF(ParseError, Success) Success() : Success("Successfully completed, should be caught and quit", ExitCodes::Success) {} }; /// -h or --help on command line class CallForHelp : public ParseError { CLI11_ERROR_DEF(ParseError, CallForHelp) CallForHelp() : CallForHelp("This should be caught in your main function, see examples", ExitCodes::Success) {} }; /// Usually something like --help-all on command line class CallForAllHelp : public ParseError { CLI11_ERROR_DEF(ParseError, CallForAllHelp) CallForAllHelp() : CallForAllHelp("This should be caught in your main function, see examples", ExitCodes::Success) {} }; /// Does not output a diagnostic in CLI11_PARSE, but allows to return from main() with a specific error code. class RuntimeError : public ParseError { CLI11_ERROR_DEF(ParseError, RuntimeError) explicit RuntimeError(int exit_code = 1) : RuntimeError("Runtime error", exit_code) {} }; /// Thrown when parsing an INI file and it is missing class FileError : public ParseError { CLI11_ERROR_DEF(ParseError, FileError) CLI11_ERROR_SIMPLE(FileError) static FileError Missing(std::string name) { return FileError(name + " was not readable (missing?)"); } }; /// Thrown when conversion call back fails, such as when an int fails to coerce to a string class ConversionError : public ParseError { CLI11_ERROR_DEF(ParseError, ConversionError) CLI11_ERROR_SIMPLE(ConversionError) ConversionError(std::string member, std::string name) : ConversionError("The value " + member + " is not an allowed value for " + name) {} ConversionError(std::string name, std::vector results) : ConversionError("Could not convert: " + name + " = " + detail::join( results)) {} static ConversionError TooManyInputsFlag(std::string name) { return ConversionError(name + ": too many inputs for a flag"); } static ConversionError TrueFalse(std::string name) { return ConversionError(name + ": Should be true/false or a number"); } }; /// Thrown when validation of results fails class ValidationError : public ParseError { CLI11_ERROR_DEF(ParseError, ValidationError) CLI11_ERROR_SIMPLE(ValidationError) explicit ValidationError(std::string name, std::string msg) : ValidationError(name + ": " + msg) {} }; /// Thrown when a required option is missing class RequiredError : public ParseError { CLI11_ERROR_DEF(ParseError, RequiredError) explicit RequiredError(std::string name) : RequiredError(name + " is required", ExitCodes::RequiredError) {} static RequiredError Subcommand(size_t min_subcom) { if (min_subcom == 1) { return RequiredError("A subcommand"); } else return RequiredError("Requires at least " + std::to_string( min_subcom) + " subcommands", ExitCodes::RequiredError); } static RequiredError Option(size_t min_option, size_t max_option, size_t used, const std::string& option_list) { if ((min_option == 1) && (max_option == 1) && (used == 0)) { return RequiredError("Exactly 1 option from [" + option_list + "]"); } else if ((min_option == 1) && (max_option == 1) && (used > 1)) return RequiredError("Exactly 1 option from [" + option_list + "] is required and " + std::to_string(used) + " were given", ExitCodes::RequiredError); else if ((min_option == 1) && (used == 0)) { return RequiredError("At least 1 option from [" + option_list + "]"); } else if (used < min_option) return RequiredError("Requires at least " + std::to_string( min_option) + " options used and only " + std::to_string(used) + "were given from [" + option_list + "]", ExitCodes::RequiredError); else if (max_option == 1) return RequiredError("Requires at most 1 options be given from [" + option_list + "]", ExitCodes::RequiredError); else return RequiredError("Requires at most " + std::to_string( max_option) + " options be used and " + std::to_string(used) + "were given from [" + option_list + "]", ExitCodes::RequiredError); } }; /// Thrown when the wrong number of arguments has been received class ArgumentMismatch : public ParseError { CLI11_ERROR_DEF(ParseError, ArgumentMismatch) CLI11_ERROR_SIMPLE(ArgumentMismatch) ArgumentMismatch(std::string name, int expected, size_t recieved) : ArgumentMismatch(expected > 0 ? ("Expected exactly " + std::to_string( expected) + " arguments to " + name + ", got " + std::to_string(recieved)) : ("Expected at least " + std::to_string(-expected) + " arguments to " + name + ", got " + std::to_string(recieved)), ExitCodes::ArgumentMismatch) {} static ArgumentMismatch AtLeast(std::string name, int num) { return ArgumentMismatch(name + ": At least " + std::to_string( num) + " required"); } static ArgumentMismatch TypedAtLeast(std::string name, int num, std::string type) { return ArgumentMismatch(name + ": " + std::to_string(num) + " required " + type + " missing"); } static ArgumentMismatch FlagOverride(std::string name) { return ArgumentMismatch(name + " was given a disallowed flag override"); } }; /// Thrown when a requires option is missing class RequiresError : public ParseError { CLI11_ERROR_DEF(ParseError, RequiresError) RequiresError(std::string curname, std::string subname) : RequiresError(curname + " requires " + subname, ExitCodes::RequiresError) {} }; /// Thrown when an excludes option is present class ExcludesError : public ParseError { CLI11_ERROR_DEF(ParseError, ExcludesError) ExcludesError(std::string curname, std::string subname) : ExcludesError(curname + " excludes " + subname, ExitCodes::ExcludesError) {} }; /// Thrown when too many positionals or options are found class ExtrasError : public ParseError { CLI11_ERROR_DEF(ParseError, ExtrasError) explicit ExtrasError(std::vector args) : ExtrasError((args.size() > 1 ? "The following arguments were not expected: " : "The following argument was not expected: ") + detail::rjoin(args, " "), ExitCodes::ExtrasError) {} }; /// Thrown when extra values are found in an INI file class ConfigError : public ParseError { CLI11_ERROR_DEF(ParseError, ConfigError) CLI11_ERROR_SIMPLE(ConfigError) static ConfigError Extras(std::string item) { return ConfigError("INI was not able to parse " + item); } static ConfigError NotConfigurable(std::string item) { return ConfigError(item + ": This option is not allowed in a configuration file"); } }; /// Thrown when validation fails before parsing class InvalidError : public ParseError { CLI11_ERROR_DEF(ParseError, InvalidError) explicit InvalidError(std::string name) : InvalidError(name + ": Too many positional arguments with unlimited expected args", ExitCodes::InvalidError) { } }; /// This is just a safety check to verify selection and parsing match - you should not ever see it /// Strings are directly added to this error, but again, it should never be seen. class HorribleError : public ParseError { CLI11_ERROR_DEF(ParseError, HorribleError) CLI11_ERROR_SIMPLE(HorribleError) }; // After parsing /// Thrown when counting a non-existent option class OptionNotFound : public Error { CLI11_ERROR_DEF(Error, OptionNotFound) explicit OptionNotFound(std::string name) : OptionNotFound(name + " not found", ExitCodes::OptionNotFound) {} }; #undef CLI11_ERROR_DEF #undef CLI11_ERROR_SIMPLE /// @} } // namespace CLI // From CLI/TypeTools.hpp: namespace CLI { // Type tools // Utilities for type enabling namespace detail { // Based generally on https://rmf.io/cxx11/almost-static-if /// Simple empty scoped class enum class enabler {}; /// An instance to use in EnableIf constexpr enabler dummy = {}; } // namespace detail /// A copy of enable_if_t from C++14, compatible with C++11. /// /// We could check to see if C++14 is being used, but it does not hurt to redefine this /// (even Google does this: https://github.com/google/skia/blob/master/include/private/SkTLogic.h) /// It is not in the std namespace anyway, so no harm done. template using enable_if_t = typename std::enable_if::type; /// A copy of std::void_t from C++17 (helper for C++11 and C++14) template struct make_void { using type = void; }; /// A copy of std::void_t from C++17 - same reasoning as enable_if_t, it does not hurt to redefine template using void_t = typename make_void::type; /// A copy of std::conditional_t from C++14 - same reasoning as enable_if_t, it does not hurt to redefine template using conditional_t = typename std::conditional::type; /// Check to see if something is a vector (fail check by default) template struct is_vector : std::false_type {}; /// Check to see if something is a vector (true if actually a vector) template struct is_vector> : std::true_type {}; /// Check to see if something is bool (fail check by default) template struct is_bool : std::false_type {}; /// Check to see if something is bool (true if actually a bool) template <> struct is_bool : std::true_type {}; /// Check to see if something is a shared pointer template struct is_shared_ptr : std::false_type {}; /// Check to see if something is a shared pointer (True if really a shared pointer) template struct is_shared_ptr> : std::true_type {}; /// Check to see if something is a shared pointer (True if really a shared pointer) template struct is_shared_ptr> : std::true_type {}; /// Check to see if something is copyable pointer template struct is_copyable_ptr { static bool const value = is_shared_ptr::value || std::is_pointer::value; }; /// This can be specialized to override the type deduction for IsMember. template struct IsMemberType { using type = T; }; /// The main custom type needed here is const char * should be a string. template <> struct IsMemberType { using type = std::string; }; namespace detail { // These are utilities for IsMember /// Handy helper to access the element_type generically. This is not part of is_copyable_ptr because it requires that /// pointer_traits be valid. template struct element_type { using type = typename std::conditional::value, typename std::pointer_traits::element_type, T>::type; }; /// Combination of the element type and value type - remove pointer (including smart pointers) and get the value_type of /// the container template struct element_value_type { using type = typename element_type::type::value_type; }; /// Adaptor for set-like structure: This just wraps a normal container in a few utilities that do almost nothing. template struct pair_adaptor : std::false_type { using value_type = typename T::value_type; using first_type = typename std::remove_const::type; using second_type = typename std::remove_const::type; /// Get the first value (really just the underlying value) template static auto first(Q&& pair_value) -> decltype( std::forward(pair_value)) { return std::forward(pair_value); } /// Get the second value (really just the underlying value) template static auto second(Q&& pair_value) -> decltype( std::forward(pair_value)) { return std::forward(pair_value); } }; /// Adaptor for map-like structure (true version, must have key_type and mapped_type). /// This wraps a mapped container in a few utilities access it in a general way. template struct pair_adaptor < T, conditional_t, void >> : std::true_type { using value_type = typename T::value_type; using first_type = typename std::remove_const::type; using second_type = typename std::remove_const::type; /// Get the first value (really just the underlying value) template static auto first(Q&& pair_value) -> decltype(std::get<0> (std::forward(pair_value))) { return std::get<0>(std::forward(pair_value)); } /// Get the second value (really just the underlying value) template static auto second(Q&& pair_value) -> decltype( std::get<1>(std::forward(pair_value))) { return std::get<1>(std::forward(pair_value)); } }; // Check for streamability // Based on https://stackoverflow.com/questions/22758291/how-can-i-detect-if-a-type-can-be-streamed-to-an-stdostream template class is_streamable { template static auto test(int) -> decltype(std::declval() << std::declval(), std::true_type()); template static auto test(...) -> std::false_type; public: static const bool value = decltype(test(0))::value; }; /// Convert an object to a string (directly forward if this can become a string) template ::value, detail::enabler> = detail::dummy> auto to_string(T && value) -> decltype(std::forward(value)) { return std::forward(value); } /// Convert an object to a string (streaming must be supported for that type) template < typename T, enable_if_t < !std::is_constructible::value&& is_streamable::value, detail::enabler > = detail::dummy > std::string to_string(T && value) { std::stringstream stream; stream << value; return stream.str(); } /// If conversion is not supported, return an empty string (streaming is not supported for that type) template < typename T, enable_if_t < !std::is_constructible::value&& !is_streamable::value, detail::enabler > = detail::dummy > std::string to_string(T&&) { return std::string{}; } // Type name print /// Was going to be based on /// http://stackoverflow.com/questions/1055452/c-get-name-of-type-in-template /// But this is cleaner and works better in this case template < typename T, enable_if_t < std::is_integral::value&& std::is_signed::value, detail::enabler > = detail::dummy > constexpr const char* type_name() { return "INT"; } template < typename T, enable_if_t < std::is_integral::value&& std::is_unsigned::value, detail::enabler > = detail::dummy > constexpr const char* type_name() { return "UINT"; } template ::value, detail::enabler> = detail::dummy> constexpr const char* type_name() { return "FLOAT"; } /// This one should not be used, since vector types print the internal type template ::value, detail::enabler> = detail::dummy> constexpr const char* type_name() { return "VECTOR"; } /// Print name for enumeration types template ::value, detail::enabler> = detail::dummy> constexpr const char* type_name() { return "ENUM"; } /// Print for all other types template < typename T, enable_if_t < !std::is_floating_point::value&& !std::is_integral::value&& !is_vector::value&& !std::is_enum::value, detail::enabler > = detail::dummy > constexpr const char* type_name() { return "TEXT"; } // Lexical cast /// Convert a flag into an integer value typically binary flags inline int64_t to_flag_value(std::string val) { static const std::string trueString("true"); static const std::string falseString("false"); if (val == trueString) { return 1; } if (val == falseString) { return -1; } val = detail::to_lower(val); int64_t ret; if (val.size() == 1) { switch (val[0]) { case '0': case 'f': case 'n': case '-': ret = -1; break; case '1': case 't': case 'y': case '+': ret = 1; break; case '2': case '3': case '4': case '5': case '6': case '7': case '8': case '9': ret = val[0] - '0'; break; default: throw std::invalid_argument("unrecognized character"); } return ret; } if (val == trueString || val == "on" || val == "yes" || val == "enable") { ret = 1; } else if (val == falseString || val == "off" || val == "no" || val == "disable") { ret = -1; } else { ret = std::stoll(val); } return ret; } /// Signed integers template < typename T, enable_if_t < std::is_integral::value&& std::is_signed::value&& !is_bool::value&& !std::is_enum::value, detail::enabler > = detail::dummy > bool lexical_cast(std::string input, T& output) { try { size_t n = 0; long long output_ll = std::stoll(input, &n, 0); output = static_cast(output_ll); return n == input.size() && static_cast(output) == output_ll; } catch (const std::invalid_argument&) { return false; } catch (const std::out_of_range&) { return false; } } /// Unsigned integers template < typename T, enable_if_t < std::is_integral::value&& std::is_unsigned::value&& !is_bool::value, detail::enabler > = detail::dummy > bool lexical_cast(std::string input, T& output) { if (!input.empty() && input.front() == '-') { return false; // std::stoull happily converts negative values to junk without any errors. } try { size_t n = 0; unsigned long long output_ll = std::stoull(input, &n, 0); output = static_cast(output_ll); return n == input.size() && static_cast(output) == output_ll; } catch (const std::invalid_argument&) { return false; } catch (const std::out_of_range&) { return false; } } /// Boolean values template ::value, detail::enabler> = detail::dummy> bool lexical_cast(std::string input, T& output) { try { auto out = to_flag_value(input); output = (out > 0); return true; } catch (const std::invalid_argument&) { return false; } } /// Floats template ::value, detail::enabler> = detail::dummy> bool lexical_cast(std::string input, T& output) { try { size_t n = 0; output = static_cast(std::stold(input, &n)); return n == input.size(); } catch (const std::invalid_argument&) { return false; } catch (const std::out_of_range&) { return false; } } /// String and similar template < typename T, enable_if_t < !std::is_floating_point::value&& !std::is_integral::value&& std::is_assignable::value, detail::enabler > = detail::dummy > bool lexical_cast(std::string input, T& output) { output = input; return true; } /// Enumerations template ::value, detail::enabler> = detail::dummy> bool lexical_cast(std::string input, T& output) { typename std::underlying_type::type val; bool retval = detail::lexical_cast(input, val); if (!retval) { return false; } output = static_cast(val); return true; } /// Non-string parsable template < typename T, enable_if_t < !std::is_floating_point::value&& !std::is_integral::value&& !std::is_assignable::value&& !std::is_enum::value, detail::enabler > = detail::dummy > bool lexical_cast(std::string input, T& output) { std::istringstream is; is.str(input); is >> output; return !is.fail() && !is.rdbuf()->in_avail(); } /// Sum a vector of flag representations /// The flag vector produces a series of strings in a vector, simple true is represented by a "1", simple false is by /// "-1" an if numbers are passed by some fashion they are captured as well so the function just checks for the most /// common true and false strings then uses stoll to convert the rest for summing template < typename T, enable_if_t < std::is_integral::value&& std::is_unsigned::value, detail::enabler > = detail::dummy > void sum_flag_vector(const std::vector& flags, T& output) { int64_t count{0}; for (auto& flag : flags) { count += detail::to_flag_value(flag); } output = (count > 0) ? static_cast(count) : T{0}; } /// Sum a vector of flag representations /// The flag vector produces a series of strings in a vector, simple true is represented by a "1", simple false is by /// "-1" an if numbers are passed by some fashion they are captured as well so the function just checks for the most /// common true and false strings then uses stoll to convert the rest for summing template < typename T, enable_if_t < std::is_integral::value&& std::is_signed::value, detail::enabler > = detail::dummy > void sum_flag_vector(const std::vector& flags, T& output) { int64_t count{0}; for (auto& flag : flags) { count += detail::to_flag_value(flag); } output = static_cast(count); } } // namespace detail } // namespace CLI // From CLI/Split.hpp: namespace CLI { namespace detail { // Returns false if not a short option. Otherwise, sets opt name and rest and returns true inline bool split_short(const std::string& current, std::string& name, std::string& rest) { if (current.size() > 1 && current[0] == '-' && valid_first_char(current[1])) { name = current.substr(1, 1); rest = current.substr(2); return true; } else { return false; } } // Returns false if not a long option. Otherwise, sets opt name and other side of = and returns true inline bool split_long(const std::string& current, std::string& name, std::string& value) { if (current.size() > 2 && current.substr(0, 2) == "--" && valid_first_char(current[2])) { auto loc = current.find_first_of('='); if (loc != std::string::npos) { name = current.substr(2, loc - 2); value = current.substr(loc + 1); } else { name = current.substr(2); value = ""; } return true; } else { return false; } } // Returns false if not a windows style option. Otherwise, sets opt name and value and returns true inline bool split_windows_style(const std::string& current, std::string& name, std::string& value) { if (current.size() > 1 && current[0] == '/' && valid_first_char(current[1])) { auto loc = current.find_first_of(':'); if (loc != std::string::npos) { name = current.substr(1, loc - 1); value = current.substr(loc + 1); } else { name = current.substr(1); value = ""; } return true; } else { return false; } } // Splits a string into multiple long and short names inline std::vector split_names(std::string current) { std::vector output; size_t val; while ((val = current.find(",")) != std::string::npos) { output.push_back(trim_copy(current.substr(0, val))); current = current.substr(val + 1); } output.push_back(trim_copy(current)); return output; } /// extract default flag values either {def} or starting with a ! inline std::vector> get_default_flag_values( const std::string& str) { std::vector flags = split_names(str); flags.erase(std::remove_if(flags.begin(), flags.end(), [](const std::string & name) { return ((name.empty()) || (!(((name.find_first_of('{') != std::string::npos) && (name.back() == '}')) || (name[0] == '!')))); }), flags.end()); std::vector> output; output.reserve(flags.size()); for (auto& flag : flags) { auto def_start = flag.find_first_of('{'); std::string defval = "false"; if ((def_start != std::string::npos) && (flag.back() == '}')) { defval = flag.substr(def_start + 1); defval.pop_back(); flag.erase(def_start, std::string::npos); } flag.erase(0, flag.find_first_not_of("-!")); output.emplace_back(flag, defval); } return output; } /// Get a vector of short names, one of long names, and a single name inline std::tuple, std::vector, std::string> get_names(const std::vector& input) { std::vector short_names; std::vector long_names; std::string pos_name; for (std::string name : input) { if (name.length() == 0) { continue; } else if (name.length() > 1 && name[0] == '-' && name[1] != '-') { if (name.length() == 2 && valid_first_char(name[1])) { short_names.emplace_back(1, name[1]); } else { throw BadNameString::OneCharName(name); } } else if (name.length() > 2 && name.substr(0, 2) == "--") { name = name.substr(2); if (valid_name_string(name)) { long_names.push_back(name); } else { throw BadNameString::BadLongName(name); } } else if (name == "-" || name == "--") { throw BadNameString::DashesOnly(name); } else { if (pos_name.length() > 0) { throw BadNameString::MultiPositionalNames(name); } pos_name = name; } } return std::tuple, std::vector, std::string> ( short_names, long_names, pos_name); } } // namespace detail } // namespace CLI // From CLI/ConfigFwd.hpp: namespace CLI { class App; namespace detail { /// Comma separated join, adds quotes if needed inline std::string ini_join(std::vector args) { std::ostringstream s; size_t start = 0; for (const auto& arg : args) { if (start++ > 0) { s << " "; } auto it = std::find_if(arg.begin(), arg.end(), [](char ch) { return std::isspace(ch, std::locale()); }); if (it == arg.end()) { s << arg; } else if (arg.find_first_of('\"') == std::string::npos) { s << '\"' << arg << '\"'; } else { s << '\'' << arg << '\''; } } return s.str(); } } // namespace detail /// Holds values to load into Options struct ConfigItem { /// This is the list of parents std::vector parents; /// This is the name std::string name; /// Listing of inputs std::vector inputs; /// The list of parents and name joined by "." std::string fullname() const { std::vector tmp = parents; tmp.emplace_back(name); return detail::join(tmp, "."); } }; /// This class provides a converter for configuration files. class Config { protected: std::vector items; public: /// Convert an app into a configuration virtual std::string to_config(const App*, bool, bool, std::string) const = 0; /// Convert a configuration into an app virtual std::vector from_config(std::istream&) const = 0; /// Get a flag value virtual std::string to_flag(const ConfigItem& item) const { if (item.inputs.size() == 1) { return item.inputs.at(0); } throw ConversionError::TooManyInputsFlag(item.fullname()); } /// Parse a config file, throw an error (ParseError:ConfigParseError or FileError) on failure std::vector from_file(const std::string& name) { std::ifstream input{name}; if (!input.good()) { throw FileError::Missing(name); } return from_config(input); } /// Virtual destructor virtual ~Config() = default; }; /// This converter works with INI files class ConfigINI : public Config { public: std::string to_config(const App*, bool default_also, bool write_description, std::string prefix) const override; std::vector from_config(std::istream& input) const override { std::string line; std::string section = "default"; std::vector output; while (getline(input, line)) { std::vector items_buffer; detail::trim(line); size_t len = line.length(); if (len > 1 && line[0] == '[' && line[len - 1] == ']') { section = line.substr(1, len - 2); } else if (len > 0 && line[0] != ';') { output.emplace_back(); ConfigItem& out = output.back(); // Find = in string, split and recombine auto pos = line.find('='); if (pos != std::string::npos) { out.name = detail::trim_copy(line.substr(0, pos)); std::string item = detail::trim_copy(line.substr(pos + 1)); items_buffer = detail::split_up(item); } else { out.name = detail::trim_copy(line); items_buffer = {"ON"}; } if (detail::to_lower(section) != "default") { out.parents = {section}; } if (out.name.find('.') != std::string::npos) { std::vector plist = detail::split(out.name, '.'); out.name = plist.back(); plist.pop_back(); out.parents.insert(out.parents.end(), plist.begin(), plist.end()); } out.inputs.insert(std::end(out.inputs), std::begin(items_buffer), std::end(items_buffer)); } } return output; } }; } // namespace CLI // From CLI/Validators.hpp: namespace CLI { class Option; /// @defgroup validator_group Validators /// @brief Some validators that are provided /// /// These are simple `std::string(const std::string&)` validators that are useful. They return /// a string if the validation fails. A custom struct is provided, as well, with the same user /// semantics, but with the ability to provide a new type name. /// @{ /// class Validator { protected: /// This is the description function, if empty the description_ will be used std::function desc_function_{[]() { return std::string{}; }}; /// This it the base function that is to be called. /// Returns a string error message if validation fails. std::function func_{[](std::string&) { return std::string{}; }}; /// The name for search purposes of the Validator std::string name_; /// Enable for Validator to allow it to be disabled if need be bool active_{true}; /// specify that a validator should not modify the input bool non_modifying_{false}; public: Validator() = default; /// Construct a Validator with just the description string explicit Validator(std::string validator_desc) : desc_function_([validator_desc]() { return validator_desc; }) {} // Construct Validator from basic information Validator(std::function op, std::string validator_desc, std::string validator_name = "") : desc_function_([validator_desc]() { return validator_desc; }), func_(std::move(op)), name_(std::move(validator_name)) {} /// Set the Validator operation function Validator& operation(std::function op) { func_ = std::move(op); return *this; } /// This is the required operator for a Validator - provided to help /// users (CLI11 uses the member `func` directly) std::string operator()(std::string& str) const { std::string retstring; if (active_) { if (non_modifying_) { std::string value = str; retstring = func_(value); } else { retstring = func_(str); } } return retstring; }; /// This is the required operator for a Validator - provided to help /// users (CLI11 uses the member `func` directly) std::string operator()(const std::string& str) const { std::string value = str; return (active_) ? func_(value) : std::string{}; }; /// Specify the type string Validator& description(std::string validator_desc) { desc_function_ = [validator_desc]() { return validator_desc; }; return *this; } /// Generate type description information for the Validator std::string get_description() const { if (active_) { return desc_function_(); } return std::string{}; } /// Specify the type string Validator& name(std::string validator_name) { name_ = std::move(validator_name); return *this; } /// Get the name of the Validator const std::string& get_name() const { return name_; } /// Specify whether the Validator is active or not Validator& active(bool active_val = true) { active_ = active_val; return *this; } /// Specify whether the Validator can be modifying or not Validator& non_modifying(bool no_modify = true) { non_modifying_ = no_modify; return *this; } /// Get a boolean if the validator is active bool get_active() const { return active_; } /// Get a boolean if the validator is allowed to modify the input returns true if it can modify the input bool get_modifying() const { return !non_modifying_; } /// Combining validators is a new validator. Type comes from left validator if function, otherwise only set if the /// same. Validator operator&(const Validator& other) const { Validator newval; newval._merge_description(*this, other, " AND "); // Give references (will make a copy in lambda function) const std::function& f1 = func_; const std::function& f2 = other.func_; newval.func_ = [f1, f2](std::string & input) { std::string s1 = f1(input); std::string s2 = f2(input); if (!s1.empty() && !s2.empty()) { return std::string("(") + s1 + ") AND (" + s2 + ")"; } else { return s1 + s2; } }; newval.active_ = (active_ & other.active_); return newval; } /// Combining validators is a new validator. Type comes from left validator if function, otherwise only set if the /// same. Validator operator|(const Validator& other) const { Validator newval; newval._merge_description(*this, other, " OR "); // Give references (will make a copy in lambda function) const std::function& f1 = func_; const std::function& f2 = other.func_; newval.func_ = [f1, f2](std::string & input) { std::string s1 = f1(input); std::string s2 = f2(input); if (s1.empty() || s2.empty()) { return std::string(); } else { return std::string("(") + s1 + ") OR (" + s2 + ")"; } }; newval.active_ = (active_ & other.active_); return newval; } /// Create a validator that fails when a given validator succeeds Validator operator!() const { Validator newval; const std::function& dfunc1 = desc_function_; newval.desc_function_ = [dfunc1]() { auto str = dfunc1(); return (!str.empty()) ? std::string("NOT ") + str : std::string{}; }; // Give references (will make a copy in lambda function) const std::function& f1 = func_; newval.func_ = [f1, dfunc1](std::string & test) -> std::string { std::string s1 = f1(test); if (s1.empty()) { return std::string("check ") + dfunc1() + " succeeded improperly"; } else return std::string{}; }; newval.active_ = active_; return newval; } private: void _merge_description(const Validator& val1, const Validator& val2, const std::string& merger) { const std::function& dfunc1 = val1.desc_function_; const std::function& dfunc2 = val2.desc_function_; desc_function_ = [ = ]() { std::string f1 = dfunc1(); std::string f2 = dfunc2(); if ((f1.empty()) || (f2.empty())) { return f1 + f2; } return std::string("(") + f1 + ")" + merger + "(" + f2 + ")"; }; } }; /// Class wrapping some of the accessors of Validator class CustomValidator : public Validator { public: }; // The implementation of the built in validators is using the Validator class; // the user is only expected to use the const (static) versions (since there's no setup). // Therefore, this is in detail. namespace detail { /// Check for an existing file (returns error message if check fails) class ExistingFileValidator : public Validator { public: ExistingFileValidator() : Validator("FILE") { func_ = [](std::string & filename) { struct stat buffer; bool exist = stat(filename.c_str(), &buffer) == 0; bool is_dir = (buffer.st_mode & S_IFDIR) != 0; if (!exist) { return "File does not exist: " + filename; } else if (is_dir) { return "File is actually a directory: " + filename; } return std::string(); }; } }; /// Check for an existing directory (returns error message if check fails) class ExistingDirectoryValidator : public Validator { public: ExistingDirectoryValidator() : Validator("DIR") { func_ = [](std::string & filename) { struct stat buffer; bool exist = stat(filename.c_str(), &buffer) == 0; bool is_dir = (buffer.st_mode & S_IFDIR) != 0; if (!exist) { return "Directory does not exist: " + filename; } else if (!is_dir) { return "Directory is actually a file: " + filename; } return std::string(); }; } }; /// Check for an existing path class ExistingPathValidator : public Validator { public: ExistingPathValidator() : Validator("PATH(existing)") { func_ = [](std::string & filename) { struct stat buffer; bool const exist = stat(filename.c_str(), &buffer) == 0; if (!exist) { return "Path does not exist: " + filename; } return std::string(); }; } }; /// Check for an non-existing path class NonexistentPathValidator : public Validator { public: NonexistentPathValidator() : Validator("PATH(non-existing)") { func_ = [](std::string & filename) { struct stat buffer; bool exist = stat(filename.c_str(), &buffer) == 0; if (exist) { return "Path already exists: " + filename; } return std::string(); }; } }; /// Validate the given string is a legal ipv4 address class IPV4Validator : public Validator { public: IPV4Validator() : Validator("IPV4") { func_ = [](std::string & ip_addr) { auto result = CLI::detail::split(ip_addr, '.'); if (result.size() != 4) { return "Invalid IPV4 address must have four parts " + ip_addr; } int num; bool retval = true; for (const auto& var : result) { retval &= detail::lexical_cast(var, num); if (!retval) { return "Failed parsing number " + var; } if (num < 0 || num > 255) { return "Each IP number must be between 0 and 255 " + var; } } return std::string(); }; } }; /// Validate the argument is a number and greater than or equal to 0 class PositiveNumber : public Validator { public: PositiveNumber() : Validator("POSITIVE") { func_ = [](std::string & number_str) { int number; if (!detail::lexical_cast(number_str, number)) { return "Failed parsing number " + number_str; } if (number < 0) { return "Number less then 0 " + number_str; } return std::string(); }; } }; /// Validate the argument is a number and greater than or equal to 0 class Number : public Validator { public: Number() : Validator("NUMBER") { func_ = [](std::string & number_str) { double number; if (!detail::lexical_cast(number_str, number)) { return "Failed parsing as a number " + number_str; } return std::string(); }; } }; } // namespace detail // Static is not needed here, because global const implies static. /// Check for existing file (returns error message if check fails) const detail::ExistingFileValidator ExistingFile; /// Check for an existing directory (returns error message if check fails) const detail::ExistingDirectoryValidator ExistingDirectory; /// Check for an existing path const detail::ExistingPathValidator ExistingPath; /// Check for an non-existing path const detail::NonexistentPathValidator NonexistentPath; /// Check for an IP4 address const detail::IPV4Validator ValidIPV4; /// Check for a positive number const detail::PositiveNumber PositiveNumber; /// Check for a number const detail::Number Number; /// Produce a range (factory). Min and max are inclusive. class Range : public Validator { public: /// This produces a range with min and max inclusive. /// /// Note that the constructor is templated, but the struct is not, so C++17 is not /// needed to provide nice syntax for Range(a,b). template Range(T min, T max) { std::stringstream out; out << detail::type_name() << " in [" << min << " - " << max << "]"; description(out.str()); func_ = [min, max](std::string & input) { T val; bool converted = detail::lexical_cast(input, val); if ((!converted) || (val < min || val > max)) { return "Value " + input + " not in range " + std::to_string( min) + " to " + std::to_string(max); } return std::string(); }; } /// Range of one value is 0 to value template explicit Range(T max) : Range(static_cast(0), max) {} }; /// Produce a bounded range (factory). Min and max are inclusive. class Bound : public Validator { public: /// This bounds a value with min and max inclusive. /// /// Note that the constructor is templated, but the struct is not, so C++17 is not /// needed to provide nice syntax for Range(a,b). template Bound(T min, T max) { std::stringstream out; out << detail::type_name() << " bounded to [" << min << " - " << max << "]"; description(out.str()); func_ = [min, max](std::string & input) { T val; bool converted = detail::lexical_cast(input, val); if (!converted) { return "Value " + input + " could not be converted"; } if (val < min) { input = detail::as_string(min); } else if (val > max) { input = detail::as_string(max); } return std::string(); }; } /// Range of one value is 0 to value template explicit Bound(T max) : Bound(static_cast(0), max) {} }; namespace detail { template ::type>::value, detail::enabler> = detail::dummy> auto smart_deref(T value) -> decltype(*value) { return *value; } template < typename T, enable_if_t < !is_copyable_ptr::type>::value, detail::enabler > = detail::dummy > typename std::remove_reference::type & smart_deref(T& value) { return value; } /// Generate a string representation of a set template std::string generate_set(const T& set) { using element_t = typename detail::element_type::type; using iteration_type_t = typename detail::pair_adaptor::value_type; // the type of the object pair std::string out(1, '{'); out.append(detail::join(detail::smart_deref(set), [](const iteration_type_t& v) { return detail::pair_adaptor::first(v); }, ",")); out.push_back('}'); return out; } /// Generate a string representation of a map template std::string generate_map(const T& map, bool key_only = false) { using element_t = typename detail::element_type::type; using iteration_type_t = typename detail::pair_adaptor::value_type; // the type of the object pair std::string out(1, '{'); out.append(detail::join(detail::smart_deref(map), [key_only](const iteration_type_t& v) { auto res = detail::as_string(detail::pair_adaptor::first(v)); if (!key_only) { res += "->" + detail::as_string(detail::pair_adaptor::second(v)); } return res; }, ",")); out.push_back('}'); return out; } template struct sfinae_true : std::true_type {}; /// Function to check for the existence of a member find function which presumably is more efficient than looping over /// everything template static auto test_find(int) -> sfinae_true().find(std::declval()))>; template static auto test_find(long) -> std::false_type; template struct has_find : decltype(test_find (0)) {}; /// A search function template < typename T, typename V, enable_if_t < !has_find::value, detail::enabler > = detail::dummy > auto search(const T& set, const V& val) -> std::pair { using element_t = typename detail::element_type::type; auto& setref = detail::smart_deref(set); auto it = std::find_if(std::begin(setref), std::end(setref), [&val](decltype(*std::begin(setref)) v) { return (detail::pair_adaptor::first(v) == val); }); return {(it != std::end(setref)), it}; } /// A search function that uses the built in find function template ::value, detail::enabler> = detail::dummy> auto search(const T& set, const V& val) -> std::pair { auto& setref = detail::smart_deref(set); auto it = setref.find(val); return {(it != std::end(setref)), it}; } /// A search function with a filter function template auto search(const T& set, const V& val, const std::function& filter_function) -> std::pair { using element_t = typename detail::element_type::type; // do the potentially faster first search auto res = search(set, val); if ((res.first) || (!(filter_function))) { return res; } // if we haven't found it do the longer linear search with all the element translations auto& setref = detail::smart_deref(set); auto it = std::find_if(std::begin(setref), std::end(setref), [&](decltype(*std::begin(setref)) v) { V a = detail::pair_adaptor::first(v); a = filter_function(a); return (a == val); }); return {(it != std::end(setref)), it}; } /// Performs a *= b; if it doesn't cause integer overflow. Returns false otherwise. template typename std::enable_if::value, bool>::type checked_multiply(T& a, T b) { if (a == 0 || b == 0) { a *= b; return true; } T c = a * b; if (c / a != b) { return false; } a = c; return true; } /// Performs a *= b; if it doesn't equal infinity. Returns false otherwise. template typename std::enable_if::value, bool>::type checked_multiply(T& a, T b) { T c = a * b; if (std::isinf(c) && !std::isinf(a) && !std::isinf(b)) { return false; } a = c; return true; } } // namespace detail /// Verify items are in a set class IsMember : public Validator { public: using filter_fn_t = std::function; /// This allows in-place construction using an initializer list template explicit IsMember(std::initializer_list values, Args&& ... args) : IsMember(std::vector(values), std::forward(args)...) {} /// This checks to see if an item is in a set (empty function) template explicit IsMember(T&& set) : IsMember(std::forward (set), nullptr) {} /// This checks to see if an item is in a set: pointer or copy version. You can pass in a function that will filter /// both sides of the comparison before computing the comparison. template explicit IsMember(T set, F filter_function) { // Get the type of the contained item - requires a container have ::value_type // if the type does not have first_type and second_type, these are both value_type using element_t = typename detail::element_type::type; // Removes (smart) pointers if needed using item_t = typename detail::pair_adaptor::first_type; // Is value_type if not a map using local_item_t = typename IsMemberType::type; // This will convert bad types to good ones // (const char * to std::string) // Make a local copy of the filter function, using a std::function if not one already std::function filter_fn = filter_function; // This is the type name for help, it will take the current version of the set contents desc_function_ = [set]() { return detail::generate_set(detail::smart_deref(set)); }; // This is the function that validates // It stores a copy of the set pointer-like, so shared_ptr will stay alive func_ = [set, filter_fn](std::string & input) { local_item_t b; if (!detail::lexical_cast(input, b)) { throw ValidationError(input); // name is added later } if (filter_fn) { b = filter_fn(b); } auto res = detail::search(set, b, filter_fn); if (res.first) { // Make sure the version in the input string is identical to the one in the set if (filter_fn) { input = detail::as_string(detail::pair_adaptor::first(* (res.second))); } // Return empty error string (success) return std::string{}; } // If you reach this point, the result was not found std::string out(" not in "); out += detail::generate_set(detail::smart_deref(set)); return out; }; } /// You can pass in as many filter functions as you like, they nest (string only currently) template IsMember(T&& set, filter_fn_t filter_fn_1, filter_fn_t filter_fn_2, Args&& ... other) : IsMember(std::forward(set), [filter_fn_1, filter_fn_2](std::string a) { return filter_fn_2(filter_fn_1(a)); }, other...) {} }; /// definition of the default transformation object template using TransformPairs = std::vector>; /// Translate named items to other or a value set class Transformer : public Validator { public: using filter_fn_t = std::function; /// This allows in-place construction template explicit Transformer(std::initializer_list> values, Args&& ... args) : Transformer(TransformPairs(values), std::forward(args)...) {} /// direct map of std::string to std::string template explicit Transformer(T&& mapping) : Transformer( std::forward(mapping), nullptr) {} /// This checks to see if an item is in a set: pointer or copy version. You can pass in a function that will filter /// both sides of the comparison before computing the comparison. template explicit Transformer(T mapping, F filter_function) { static_assert( detail::pair_adaptor::type>::value, "mapping must produce value pairs"); // Get the type of the contained item - requires a container have ::value_type // if the type does not have first_type and second_type, these are both value_type using element_t = typename detail::element_type::type; // Removes (smart) pointers if needed using item_t = typename detail::pair_adaptor::first_type; // Is value_type if not a map using local_item_t = typename IsMemberType::type; // This will convert bad types to good ones // (const char * to std::string) // Make a local copy of the filter function, using a std::function if not one already std::function filter_fn = filter_function; // This is the type name for help, it will take the current version of the set contents desc_function_ = [mapping]() { return detail::generate_map(detail::smart_deref(mapping)); }; func_ = [mapping, filter_fn](std::string & input) { local_item_t b; if (!detail::lexical_cast(input, b)) { return std::string(); // there is no possible way we can match anything in the mapping if we can't convert so just return } if (filter_fn) { b = filter_fn(b); } auto res = detail::search(mapping, b, filter_fn); if (res.first) { input = detail::as_string(detail::pair_adaptor::second(*res.second)); } return std::string{}; }; } /// You can pass in as many filter functions as you like, they nest template Transformer(T&& mapping, filter_fn_t filter_fn_1, filter_fn_t filter_fn_2, Args&& ... other) : Transformer(std::forward(mapping), [filter_fn_1, filter_fn_2](std::string a) { return filter_fn_2(filter_fn_1(a)); }, other...) {} }; /// translate named items to other or a value set class CheckedTransformer : public Validator { public: using filter_fn_t = std::function; /// This allows in-place construction template explicit CheckedTransformer( std::initializer_list> values, Args&& ... args) : CheckedTransformer(TransformPairs(values), std::forward(args)...) {} /// direct map of std::string to std::string template explicit CheckedTransformer(T mapping) : CheckedTransformer(std::move(mapping), nullptr) {} /// This checks to see if an item is in a set: pointer or copy version. You can pass in a function that will filter /// both sides of the comparison before computing the comparison. template explicit CheckedTransformer(T mapping, F filter_function) { static_assert( detail::pair_adaptor::type>::value, "mapping must produce value pairs"); // Get the type of the contained item - requires a container have ::value_type // if the type does not have first_type and second_type, these are both value_type using element_t = typename detail::element_type::type; // Removes (smart) pointers if needed using item_t = typename detail::pair_adaptor::first_type; // Is value_type if not a map using local_item_t = typename IsMemberType::type; // This will convert bad types to good ones // (const char * to std::string) using iteration_type_t = typename detail::pair_adaptor::value_type; // the type of the object pair // // the type of the object pair // Make a local copy of the filter function, using a std::function if not one already std::function filter_fn = filter_function; auto tfunc = [mapping]() { std::string out("value in "); out += detail::generate_map(detail::smart_deref(mapping)) + " OR {"; out += detail::join( detail::smart_deref(mapping), [](const iteration_type_t& v) { return detail::as_string(detail::pair_adaptor::second(v)); }, ","); out.push_back('}'); return out; }; desc_function_ = tfunc; func_ = [mapping, tfunc, filter_fn](std::string & input) { local_item_t b; bool converted = detail::lexical_cast(input, b); if (converted) { if (filter_fn) { b = filter_fn(b); } auto res = detail::search(mapping, b, filter_fn); if (res.first) { input = detail::as_string(detail::pair_adaptor::second(*res.second)); return std::string{}; } } for (const auto& v : detail::smart_deref(mapping)) { auto output_string = detail::as_string(detail::pair_adaptor::second( v)); if (output_string == input) { return std::string(); } } return "Check " + input + " " + tfunc() + " FAILED"; }; } /// You can pass in as many filter functions as you like, they nest template CheckedTransformer(T&& mapping, filter_fn_t filter_fn_1, filter_fn_t filter_fn_2, Args&& ... other) : CheckedTransformer(std::forward(mapping), [filter_fn_1, filter_fn_2](std::string a) { return filter_fn_2(filter_fn_1(a)); }, other...) {} }; /// Helper function to allow ignore_case to be passed to IsMember or Transform inline std::string ignore_case(std::string item) { return detail::to_lower(item); } /// Helper function to allow ignore_underscore to be passed to IsMember or Transform inline std::string ignore_underscore(std::string item) { return detail::remove_underscore(item); } /// Helper function to allow checks to ignore spaces to be passed to IsMember or Transform inline std::string ignore_space(std::string item) { item.erase(std::remove(std::begin(item), std::end(item), ' '), std::end(item)); item.erase(std::remove(std::begin(item), std::end(item), '\t'), std::end(item)); return item; } /// Multiply a number by a factor using given mapping. /// Can be used to write transforms for SIZE or DURATION inputs. /// /// Example: /// With mapping = `{"b"->1, "kb"->1024, "mb"->1024*1024}` /// one can recognize inputs like "100", "12kb", "100 MB", /// that will be automatically transformed to 100, 14448, 104857600. /// /// Output number type matches the type in the provided mapping. /// Therefore, if it is required to interpret real inputs like "0.42 s", /// the mapping should be of a type or . class AsNumberWithUnit : public Validator { public: /// Adjust AsNumberWithUnit behavior. /// CASE_SENSITIVE/CASE_INSENSITIVE controls how units are matched. /// UNIT_OPTIONAL/UNIT_REQUIRED throws ValidationError /// if UNIT_REQUIRED is set and unit literal is not found. enum Options { CASE_SENSITIVE = 0, CASE_INSENSITIVE = 1, UNIT_OPTIONAL = 0, UNIT_REQUIRED = 2, DEFAULT = CASE_INSENSITIVE | UNIT_OPTIONAL }; template explicit AsNumberWithUnit(std::map mapping, Options opts = DEFAULT, const std::string& unit_name = "UNIT") { description(generate_description(unit_name, opts)); validate_mapping(mapping, opts); // transform function func_ = [mapping, opts](std::string & input) -> std::string { Number num; detail::rtrim(input); if (input.empty()) { throw ValidationError("Input is empty"); } // Find split position between number and prefix auto unit_begin = input.end(); while (unit_begin > input.begin() && std::isalpha(*(unit_begin - 1), std::locale())) { --unit_begin; } std::string unit{unit_begin, input.end()}; input.resize(static_cast(std::distance(input.begin(), unit_begin))); detail::trim(input); if (opts & UNIT_REQUIRED && unit.empty()) { throw ValidationError("Missing mandatory unit"); } if (opts & CASE_INSENSITIVE) { unit = detail::to_lower(unit); } bool converted = detail::lexical_cast(input, num); if (!converted) { throw ValidationError("Value " + input + " could not be converted to " + detail::type_name()); } if (unit.empty()) { // No need to modify input if no unit passed return {}; } // find corresponding factor auto it = mapping.find(unit); if (it == mapping.end()) { throw ValidationError(unit + " unit not recognized. " "Allowed values: " + detail::generate_map(mapping, true)); } // perform safe multiplication bool ok = detail::checked_multiply(num, it->second); if (!ok) { throw ValidationError(detail::as_string(num) + " multiplied by " + unit + " factor would cause number overflow. Use smaller value."); } input = detail::as_string(num); return {}; }; } private: /// Check that mapping contains valid units. /// Update mapping for CASE_INSENSITIVE mode. template static void validate_mapping( std::map& mapping, Options opts) { for (auto& kv : mapping) { if (kv.first.empty()) { throw ValidationError("Unit must not be empty."); } if (!detail::isalpha(kv.first)) { throw ValidationError("Unit must contain only letters."); } } // make all units lowercase if CASE_INSENSITIVE if (opts & CASE_INSENSITIVE) { std::map lower_mapping; for (auto& kv : mapping) { auto s = detail::to_lower(kv.first); if (lower_mapping.count(s)) { throw ValidationError("Several matching lowercase unit representations are found: " + s); } lower_mapping[detail::to_lower(kv.first)] = kv.second; } mapping = std::move(lower_mapping); } } /// Generate description like this: NUMBER [UNIT] template static std::string generate_description( const std::string& name, Options opts) { std::stringstream out; out << detail::type_name() << ' '; if (opts & UNIT_REQUIRED) { out << name; } else { out << '[' << name << ']'; } return out.str(); } }; /// Converts a human-readable size string (with unit literal) to uin64_t size. /// Example: /// "100" => 100 /// "1 b" => 100 /// "10Kb" => 10240 // you can configure this to be interpreted as kilobyte (*1000) or kibibyte (*1024) /// "10 KB" => 10240 /// "10 kb" => 10240 /// "10 kib" => 10240 // *i, *ib are always interpreted as *bibyte (*1024) /// "10kb" => 10240 /// "2 MB" => 2097152 /// "2 EiB" => 2^61 // Units up to exibyte are supported class AsSizeValue : public AsNumberWithUnit { public: using result_t = uint64_t; /// If kb_is_1000 is true, /// interpret 'kb', 'k' as 1000 and 'kib', 'ki' as 1024 /// (same applies to higher order units as well). /// Otherwise, interpret all literals as factors of 1024. /// The first option is formally correct, but /// the second interpretation is more wide-spread /// (see https://en.wikipedia.org/wiki/Binary_prefix). explicit AsSizeValue(bool kb_is_1000) : AsNumberWithUnit(get_mapping( kb_is_1000)) { if (kb_is_1000) { description("SIZE [b, kb(=1000b), kib(=1024b), ...]"); } else { description("SIZE [b, kb(=1024b), ...]"); } } private: /// Get mapping static std::map init_mapping(bool kb_is_1000) { std::map m; result_t k_factor = kb_is_1000 ? 1000 : 1024; result_t ki_factor = 1024; result_t k = 1; result_t ki = 1; m["b"] = 1; for (std::string p : { "k", "m", "g", "t", "p", "e" }) { k *= k_factor; ki *= ki_factor; m[p] = k; m[p + "b"] = k; m[p + "i"] = ki; m[p + "ib"] = ki; } return m; } /// Cache calculated mapping static std::map get_mapping(bool kb_is_1000) { if (kb_is_1000) { static auto m = init_mapping(true); return m; } else { static auto m = init_mapping(false); return m; } } }; namespace detail { /// Split a string into a program name and command line arguments /// the string is assumed to contain a file name followed by other arguments /// the return value contains is a pair with the first argument containing the program name and the second /// everything else. inline std::pair split_program_name( std::string commandline) { // try to determine the programName std::pair vals; trim(commandline); auto esp = commandline.find_first_of(' ', 1); while (!ExistingFile(commandline.substr(0, esp)).empty()) { esp = commandline.find_first_of(' ', esp + 1); if (esp == std::string::npos) { // if we have reached the end and haven't found a valid file just assume the first argument is the // program name esp = commandline.find_first_of(' ', 1); break; } } vals.first = commandline.substr(0, esp); rtrim(vals.first); // strip the program name vals.second = (esp != std::string::npos) ? commandline.substr( esp + 1) : std::string{}; ltrim(vals.second); return vals; } } // namespace detail /// @} } // namespace CLI // From CLI/FormatterFwd.hpp: namespace CLI { class Option; class App; /// This enum signifies the type of help requested /// /// This is passed in by App; all user classes must accept this as /// the second argument. enum class AppFormatMode { Normal, //< The normal, detailed help All, //< A fully expanded help Sub, //< Used when printed as part of expanded subcommand }; /// This is the minimum requirements to run a formatter. /// /// A user can subclass this is if they do not care at all /// about the structure in CLI::Formatter. class FormatterBase { protected: /// @name Options ///@{ /// The width of the first column size_t column_width_{30}; /// @brief The required help printout labels (user changeable) /// Values are Needs, Excludes, etc. std::map labels_; ///@} /// @name Basic ///@{ public: FormatterBase() = default; FormatterBase(const FormatterBase&) = default; FormatterBase(FormatterBase&&) = default; /// Adding a destructor in this form to work around bug in GCC 4.7 virtual ~FormatterBase() noexcept {} // NOLINT(modernize-use-equals-default) /// This is the key method that puts together help virtual std::string make_help(const App*, std::string, AppFormatMode) const = 0; ///@} /// @name Setters ///@{ /// Set the "REQUIRED" label void label(std::string key, std::string val) { labels_[key] = val; } /// Set the column width void column_width(size_t val) { column_width_ = val; } ///@} /// @name Getters ///@{ /// Get the current value of a name (REQUIRED, etc.) std::string get_label(std::string key) const { if (labels_.find(key) == labels_.end()) { return key; } else { return labels_.at(key); } } /// Get the current column width size_t get_column_width() const { return column_width_; } ///@} }; /// This is a specialty override for lambda functions class FormatterLambda final : public FormatterBase { using funct_t = std::function; /// The lambda to hold and run funct_t lambda_; public: /// Create a FormatterLambda with a lambda function explicit FormatterLambda(funct_t funct) : lambda_(std::move(funct)) {} /// Adding a destructor (mostly to make GCC 4.7 happy) ~FormatterLambda() noexcept override {} // NOLINT(modernize-use-equals-default) /// This will simply call the lambda function std::string make_help(const App* app, std::string name, AppFormatMode mode) const override { return lambda_(app, name, mode); } }; /// This is the default Formatter for CLI11. It pretty prints help output, and is broken into quite a few /// overridable methods, to be highly customizable with minimal effort. class Formatter : public FormatterBase { public: Formatter() = default; Formatter(const Formatter&) = default; Formatter(Formatter&&) = default; /// @name Overridables ///@{ /// This prints out a group of options with title /// virtual std::string make_group(std::string group, bool is_positional, std::vector opts) const; /// This prints out just the positionals "group" virtual std::string make_positionals(const App* app) const; /// This prints out all the groups of options std::string make_groups(const App* app, AppFormatMode mode) const; /// This prints out all the subcommands virtual std::string make_subcommands(const App* app, AppFormatMode mode) const; /// This prints out a subcommand virtual std::string make_subcommand(const App* sub) const; /// This prints out a subcommand in help-all virtual std::string make_expanded(const App* sub) const; /// This prints out all the groups of options virtual std::string make_footer(const App* app) const; /// This displays the description line virtual std::string make_description(const App* app) const; /// This displays the usage line virtual std::string make_usage(const App* app, std::string name) const; /// This puts everything together std::string make_help(const App*, std::string, AppFormatMode) const override; ///@} /// @name Options ///@{ /// This prints out an option help line, either positional or optional form virtual std::string make_option(const Option* opt, bool is_positional) const { std::stringstream out; detail::format_help( out, make_option_name(opt, is_positional) + make_option_opts(opt), make_option_desc(opt), column_width_); return out.str(); } /// @brief This is the name part of an option, Default: left column virtual std::string make_option_name(const Option*, bool) const; /// @brief This is the options part of the name, Default: combined into left column virtual std::string make_option_opts(const Option*) const; /// @brief This is the description. Default: Right column, on new line if left column too large virtual std::string make_option_desc(const Option*) const; /// @brief This is used to print the name on the USAGE line virtual std::string make_option_usage(const Option* opt) const; ///@} }; } // namespace CLI // From CLI/Option.hpp: namespace CLI { using results_t = std::vector; using callback_t = std::function; class Option; class App; using Option_p = std::unique_ptr